From 523351dce10f9147b2f91486a6ec069b4dde3956 Mon Sep 17 00:00:00 2001 From: liujinyu Date: Mon, 29 Apr 2024 17:29:23 +0800 Subject: [PATCH 001/347] =?UTF-8?q?alter=20trigger=E6=96=B9=E5=BC=8F?= =?UTF-8?q?=E5=90=AF=E7=94=A8=EF=BC=88=E7=A6=81=E7=94=A8=EF=BC=89=E5=8D=95?= =?UTF-8?q?=E4=B8=AA=E8=A7=A6=E5=8F=91=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/copyfuncs.cpp | 13 ++++ src/common/backend/nodes/equalfuncs.cpp | 11 ++++ src/common/backend/nodes/nodes.cpp | 1 + src/common/backend/nodes/outfuncs.cpp | 10 +++ src/common/backend/nodes/readfuncs.cpp | 9 +++ src/common/backend/parser/gram.y | 14 +++++ .../optimizer/commands/trigger.cpp | 19 +++++- src/gausskernel/process/tcop/auditfuncs.cpp | 4 ++ src/gausskernel/process/tcop/utility.cpp | 11 ++++ src/include/commands/trigger.h | 2 + src/include/nodes/nodes.h | 1 + src/include/nodes/parsenodes_common.h | 5 ++ src/test/regress/expected/alter_trigger.out | 61 +++++++++++++++++++ src/test/regress/parallel_schedule0B | 1 + src/test/regress/sql/alter_trigger.sql | 33 ++++++++++ 15 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 src/test/regress/expected/alter_trigger.out create mode 100644 src/test/regress/sql/alter_trigger.sql diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 655d89b166..47ce926f5e 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -5146,6 +5146,16 @@ static AlterTableStmt* _copyAlterTableStmt(const AlterTableStmt* from) return newnode; } +static AlterTriggerStmt *_copyAlterTriggerStmt(const AlterTriggerStmt *from) +{ + AlterTriggerStmt *newnode = makeNode(AlterTriggerStmt); + + COPY_STRING_FIELD(trigname); + COPY_SCALAR_FIELD(tgenabled); + + return newnode; +} + static AlterTableCmd* _copyAlterTableCmd(const AlterTableCmd* from) { AlterTableCmd* newnode = makeNode(AlterTableCmd); @@ -8408,6 +8418,9 @@ void* copyObject(const void* from) case T_AlterTableStmt: retval = _copyAlterTableStmt((AlterTableStmt*)from); break; + case T_AlterTriggerStmt: + retval = _copyAlterTriggerStmt((AlterTriggerStmt *)from); + break; case T_AlterTableCmd: retval = _copyAlterTableCmd((AlterTableCmd*)from); break; diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index c2b4440e8e..8e48359b3a 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -1120,6 +1120,14 @@ static bool _equalAlterTableStmt(const AlterTableStmt* a, const AlterTableStmt* return true; } +static bool _equalAlterTriggerStmt(const AlterTriggerStmt *a, const AlterTriggerStmt *b) +{ + COMPARE_STRING_FIELD(trigname); + COMPARE_SCALAR_FIELD(tgenabled); + + return true; +} + static bool _equalAlterTableCmd(const AlterTableCmd* a, const AlterTableCmd* b) { COMPARE_SCALAR_FIELD(subtype); @@ -3909,6 +3917,9 @@ bool equal(const void* a, const void* b) case T_AlterTableStmt: retval = _equalAlterTableStmt((AlterTableStmt*)a, (AlterTableStmt*)b); break; + case T_AlterTriggerStmt: + retval = _equalAlterTriggerStmt((AlterTriggerStmt*)a, (AlterTriggerStmt*)b); + break; case T_AlterTableCmd: retval = _equalAlterTableCmd((AlterTableCmd*)a, (AlterTableCmd*)b); break; diff --git a/src/common/backend/nodes/nodes.cpp b/src/common/backend/nodes/nodes.cpp index 16e21982e0..f01dc2c83d 100755 --- a/src/common/backend/nodes/nodes.cpp +++ b/src/common/backend/nodes/nodes.cpp @@ -318,6 +318,7 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_SelectStmt, "SelectStmt"}, {T_SelectIntoVarList, "SelectIntoVarList"}, {T_AlterTableStmt, "AlterTableStmt"}, + {T_AlterTriggerStmt, "AlterTriggerStmt"}, {T_AlterTableCmd, "AlterTableCmd"}, {T_AlterDomainStmt, "AlterDomainStmt"}, {T_SetOperationStmt, "SetOperationStmt"}, diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 5e4ef26ebd..7c4096165c 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -4102,6 +4102,13 @@ static void _outAlterTableStmt(StringInfo str, AlterTableStmt* node) WRITE_BOOL_FIELD(fromReplace); } +static void _outAlterTriggerStmt(StringInfo str, AlterTriggerStmt *node) +{ + WRITE_NODE_TYPE("ALTERTRIGGER"); + WRITE_STRING_FIELD(trigname); + WRITE_CHAR_FIELD(tgenabled); +} + static void _outCopyStmt(StringInfo str, CopyStmt* node) { WRITE_NODE_TYPE("COPY"); @@ -6877,6 +6884,9 @@ static void _outNode(StringInfo str, const void* obj) case T_AlterTableStmt: _outAlterTableStmt(str, (AlterTableStmt*)obj); break; + case T_AlterTriggerStmt: + _outAlterTriggerStmt(str, (AlterTriggerStmt*)obj); + break; case T_SelectStmt: _outSelectStmt(str, (SelectStmt*)obj); break; diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index eb15a3ee1c..815eb8d4a8 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -1723,6 +1723,15 @@ static AlterTableStmt* _readAlterTableStmt(void) READ_DONE(); } +static AlterTriggerStmt* _readAlterTriggerStmt(void) +{ + READ_LOCALS(AlterTriggerStmt); + READ_STRING_FIELD(trigname); + READ_CHAR_FIELD(tgenabled); + + READ_DONE(); +} + static PLDebug_variable* _readPLDebug_variable(void) { READ_LOCALS(PLDebug_variable); diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 9620c5ae40..43f787911c 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -367,6 +367,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); AlterExtensionStmt AlterExtensionContentsStmt AlterForeignTableStmt AlterCompositeTypeStmt AlterUserStmt AlterUserMappingStmt AlterUserSetStmt AlterSystemStmt + AlterTriggerStmt AlterRoleStmt AlterRoleSetStmt AlterRlsPolicyStmt AlterDefaultPrivilegesStmt DefACLAction AlterSessionStmt AnalyzeStmt CleanConnStmt ClosePortalStmt ClusterStmt CommentStmt @@ -1208,6 +1209,7 @@ stmt : | AlterSubscriptionStmt | AlterTableStmt | AlterSystemStmt + | AlterTriggerStmt | AlterCompositeTypeStmt | AlterRoleSetStmt | AlterRoleStmt @@ -2068,6 +2070,18 @@ altersys_option: | {/* empty */} ; +AlterTriggerStmt: + ALTER TRIGGER name enable_trigger + { + if( u_sess->attr.attr_sql.sql_compatibility != A_FORMAT ) + ereport(ERROR,(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER TRIGGER ... ENABLE/DISABLE is supported only in A_FORMAT database."))); + AlterTriggerStmt *n = makeNode(AlterTriggerStmt); + n->trigname = $3; + n->tgenabled = $4; + $$ = (Node *)n; + } + ; /***************************************************************************** * * Drop a postgresql group diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index a6fafdc0da..a7aef124c8 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -6350,6 +6350,23 @@ void ResetTrigShipFlag() } } + +ObjectAddress AlterTrigger(AlterTriggerStmt* stmt) +{ + ObjectAddress address; + Oid trigoid; + Oid reloid; + trigoid = get_trigger_oid_b(stmt->trigname, &reloid, false); + if (OidIsValid(trigoid) && OidIsValid(reloid)) { + Relation rel; + rel = relation_open(reloid, AccessExclusiveLock); + EnableDisableTrigger(rel, stmt->trigname, stmt->tgenabled, false); + relation_close(rel, NoLock); + ObjectAddressSet(address, TriggerRelationId, trigoid); + } + return address; +} + /* build a function name for b format trigger */ static char* rebuild_funcname_for_b_trigger(char* trigname, char* relname) { @@ -6400,4 +6417,4 @@ static char* rebuild_funcname_for_b_trigger(char* trigname, char* relname) } while (true); return funcname; -} \ No newline at end of file +} diff --git a/src/gausskernel/process/tcop/auditfuncs.cpp b/src/gausskernel/process/tcop/auditfuncs.cpp index c3f8127ae6..1e141cf571 100644 --- a/src/gausskernel/process/tcop/auditfuncs.cpp +++ b/src/gausskernel/process/tcop/auditfuncs.cpp @@ -1624,6 +1624,10 @@ static void pgaudit_ProcessUtility(processutility_context* processutility_cxt, CreateTrigStmt* createstmt = (CreateTrigStmt*)(parsetree); pgaudit_ddl_trigger(createstmt->trigname, queryString); } break; + case T_AlterTriggerStmt: { /* Audit alter trigger */ + AlterTriggerStmt *alterstmt = (AlterTriggerStmt *)(parsetree); + pgaudit_ddl_trigger(alterstmt->trigname, queryString); + } break; case T_AlterDefaultPrivilegesStmt: { /* ALTER DEFAULT PRIVILEGES statement */ AlterDefaultPrivilegesStmt* alterprivilegesstmt = (AlterDefaultPrivilegesStmt*)(parsetree); pgaudit_grant_or_revoke_role(alterprivilegesstmt->action->is_grant, NULL, queryString); diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 012faef8df..811ccec86e 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -6889,6 +6889,9 @@ ProcessUtilitySlow(Node *parse_tree, case T_CreateAmStmt: address = CreateAccessMethod((CreateAmStmt *) parse_tree); break; + case T_AlterTriggerStmt: + address = AlterTrigger((AlterTriggerStmt *) parse_tree); + break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(parse_tree)); @@ -9480,6 +9483,10 @@ const char* CreateCommandTag(Node* parse_tree) tag = "CREATE ACCESS METHOD"; break; + case T_AlterTriggerStmt: + tag = "ALTER TRIGGER"; + break; + case T_PrepareStmt: tag = "PREPARE"; break; @@ -10390,6 +10397,10 @@ LogStmtLevel GetCommandLogLevel(Node* parse_tree) lev = LOGSTMT_DDL; break; + case T_AlterTriggerStmt: + lev = LOGSTMT_DDL; + break; + /* already-planned queries */ case T_PlannedStmt: { PlannedStmt* stmt = (PlannedStmt*)parse_tree; diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h index 5ec192602a..7d32044c33 100644 --- a/src/include/commands/trigger.h +++ b/src/include/commands/trigger.h @@ -204,4 +204,6 @@ extern HeapTuple GetTupleForTrigger(EState* estate, EPQState* epqstate, ResultRe int2 bucketid, ItemPointer tid, LockTupleMode lockmode, TupleTableSlot** newSlot, TM_Result* result = NULL, TM_FailureData* tmfd = NULL); +extern ObjectAddress AlterTrigger(AlterTriggerStmt *stmt); + #endif /* TRIGGER_H */ diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index a29b9c0e85..4ab9f8e727 100755 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -575,6 +575,7 @@ typedef enum NodeTag { T_AddTableIntoCBIState, T_CreateAmStmt, + T_AlterTriggerStmt, T_CreatePublicationStmt, T_AlterPublicationStmt, T_CreateSubscriptionStmt, diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h index fd13f75aa8..13236a1559 100644 --- a/src/include/nodes/parsenodes_common.h +++ b/src/include/nodes/parsenodes_common.h @@ -2588,4 +2588,9 @@ typedef struct IndexHintRelationData{ IndexHintType index_type; }IndexHintRelationData; +typedef struct AlterTriggerStmt { + NodeTag type; + char *trigname; /* TRIGGER's name */ + char tgenabled; /* trigger's firing configuration WRT session_replication_role */ +} AlterTriggerStmt; #endif /* PARSENODES_COMMONH */ diff --git a/src/test/regress/expected/alter_trigger.out b/src/test/regress/expected/alter_trigger.out new file mode 100644 index 0000000000..6ac1e1ec0b --- /dev/null +++ b/src/test/regress/expected/alter_trigger.out @@ -0,0 +1,61 @@ +create table trigtest(i serial primary key); +NOTICE: CREATE TABLE will create implicit sequence "trigtest_i_seq" for serial column "trigtest.i" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "trigtest_pkey" for table "trigtest" +create function trigtestfunc() returns trigger as $$ + begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; + end;$$ language plpgsql; +create trigger trigtest_b_row_tg before insert or update or delete on trigtest + for each row execute procedure trigtestfunc(); +create trigger trigtest_a_row_tg after insert or update or delete on trigtest + for each row execute procedure trigtestfunc(); +create table trigtest2 (i serial primary key); +NOTICE: CREATE TABLE will create implicit sequence "trigtest2_i_seq" for serial column "trigtest2.i" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "trigtest2_pkey" for table "trigtest2" +create trigger trigtest_a_row_tg after insert or update or delete on trigtest2 + for each row execute procedure trigtestfunc(); + +select tgname, tgtype, tgenabled from pg_trigger where tgname like 'trigtest%'; + tgname | tgtype | tgenabled +-------------------+--------+----------- + trigtest_a_row_tg | 29 | O + trigtest_a_row_tg | 29 | O + trigtest_b_row_tg | 31 | O +(3 rows) + +alter trigger trigtest_b_row_tg disable; +select tgname, tgtype, tgenabled from pg_trigger where tgname = 'trigtest_b_row_tg'; + tgname | tgtype | tgenabled +-------------------+--------+----------- + trigtest_b_row_tg | 31 | D +(1 row) + +insert into trigtest values(1); +NOTICE: trigtest INSERT AFTER ROW +alter trigger trigtest_b_row_tg enable; +select tgname, tgtype, tgenabled from pg_trigger where tgname = 'trigtest_b_row_tg'; + tgname | tgtype | tgenabled +-------------------+--------+----------- + trigtest_b_row_tg | 31 | O +(1 row) + +insert into trigtest values(2); +NOTICE: trigtest INSERT BEFORE ROW +NOTICE: trigtest INSERT AFTER ROW +alter trigger trigtest_a_row_tg disable; +ERROR: trigger named "trigtest_a_row_tg" has more than one trigger, please use drop trigger on syntax +select tgname, tgtype, tgenabled from pg_trigger where tgname like 'trigtest%'; + tgname | tgtype | tgenabled +-------------------+--------+----------- + trigtest_a_row_tg | 29 | O + trigtest_a_row_tg | 29 | O + trigtest_b_row_tg | 31 | O +(3 rows) + +alter trigger trigtest_err disable; +ERROR: trigger "trigtest_err" does not exist +alter trigger trigtest_err enable; +ERROR: trigger "trigtest_err" does not exist +drop table trigtest cascade; +drop table trigtest2 cascade; diff --git a/src/test/regress/parallel_schedule0B b/src/test/regress/parallel_schedule0B index fdbd2e30b6..3e68a035a8 100644 --- a/src/test/regress/parallel_schedule0B +++ b/src/test/regress/parallel_schedule0B @@ -459,3 +459,4 @@ test: udf_crem create_c_function test: create_function test: pg_compatibility +test: alter_trigger diff --git a/src/test/regress/sql/alter_trigger.sql b/src/test/regress/sql/alter_trigger.sql new file mode 100644 index 0000000000..eb7e1f0750 --- /dev/null +++ b/src/test/regress/sql/alter_trigger.sql @@ -0,0 +1,33 @@ +create table trigtest(i serial primary key); +create function trigtestfunc() returns trigger as $$ + begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; + end;$$ language plpgsql; +create trigger trigtest_b_row_tg before insert or update or delete on trigtest + for each row execute procedure trigtestfunc(); +create trigger trigtest_a_row_tg after insert or update or delete on trigtest + for each row execute procedure trigtestfunc(); + +create table trigtest2 (i serial primary key); +create trigger trigtest_a_row_tg after insert or update or delete on trigtest2 + for each row execute procedure trigtestfunc(); + +select tgname, tgtype, tgenabled from pg_trigger where tgname like 'trigtest%'; + +alter trigger trigtest_b_row_tg disable; +select tgname, tgtype, tgenabled from pg_trigger where tgname = 'trigtest_b_row_tg'; +insert into trigtest values(1); + +alter trigger trigtest_b_row_tg enable; +select tgname, tgtype, tgenabled from pg_trigger where tgname = 'trigtest_b_row_tg'; +insert into trigtest values(2); + +alter trigger trigtest_a_row_tg disable; +select tgname, tgtype, tgenabled from pg_trigger where tgname like 'trigtest%'; + +alter trigger trigtest_err disable; +alter trigger trigtest_err enable; + +drop table trigtest cascade; +drop table trigtest2 cascade; \ No newline at end of file -- Gitee From 0a84716f30dbf72424e615c07a656bccd228ccce Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Fri, 28 Jun 2024 11:46:03 +0800 Subject: [PATCH 002/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3timescaledb=E6=8F=92?= =?UTF-8?q?=E4=BB=B6,=E6=97=B6=E9=97=B4=E9=97=B4=E9=9A=94=E4=B8=BAweek?= =?UTF-8?q?=EF=BC=8C=E4=BD=BF=E7=94=A8time=5Fbucket=E5=88=86=E7=BB=84?= =?UTF-8?q?=E8=81=9A=E5=90=88=E6=8A=A5=E9=94=99=E8=B6=85=E8=8C=83=E5=9B=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/datetime.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/common/backend/utils/adt/datetime.cpp b/src/common/backend/utils/adt/datetime.cpp index e8f924cf67..e6f654471a 100644 --- a/src/common/backend/utils/adt/datetime.cpp +++ b/src/common/backend/utils/adt/datetime.cpp @@ -322,14 +322,25 @@ void j2date(int jd, int* year, int* month, int* day) */ int j2day(int date) { - unsigned int day; - - day = date; + if (!DB_IS_CMPT(PG_FORMAT)) + { + unsigned int day; - day += 1; - day %= 7; + day = date; + day += 1; + day %= 7; + return (int)day; + } + else + { + date += 1; + date %= 7; + if(date<0) + date+=7; + return date; + } + return date; - return (int)day; } /* j2day() */ /* -- Gitee From 89691977b5122756b71fe758368305e1c5715f01 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Fri, 28 Jun 2024 12:14:28 +0800 Subject: [PATCH 003/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3timescaledb=E6=8F=92?= =?UTF-8?q?=E4=BB=B6,=E5=88=9B=E5=BB=BA=E8=B6=85=E8=A1=A8=E6=97=B6?= =?UTF-8?q?=E4=BD=BF=E7=94=A8=E4=BA=86=E9=9D=9Eplpgsql=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E8=A7=A6=E5=8F=91=E5=99=A8=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/plan/planner.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/gausskernel/optimizer/plan/planner.cpp b/src/gausskernel/optimizer/plan/planner.cpp index 67e5f053f4..7bceb5c5bd 100755 --- a/src/gausskernel/optimizer/plan/planner.cpp +++ b/src/gausskernel/optimizer/plan/planner.cpp @@ -4846,9 +4846,8 @@ static Plan* internal_grouping_planner(PlannerInfo* root, double tuple_fraction) } #endif (void)MemoryContextSwitchTo(oldcontext); - if(u_sess->hook_cxt.forTsdbHook && parse->commandType == CMD_INSERT) { + if(u_sess->hook_cxt.forTsdbHook && (parse->commandType == CMD_INSERT || parse->commandType == CMD_MERGE)) { - for_plugin_rel->reltarget->exprs = tlist; List* newList = NIL; List* returningLists = NIL; -- Gitee From 11c41c967b27bb40a04eea0c8177627ae86de4e2 Mon Sep 17 00:00:00 2001 From: lukeman Date: Fri, 10 May 2024 15:41:58 +0800 Subject: [PATCH 004/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9AThreadP?= =?UTF-8?q?oolListener=20readySession=E6=B3=84=E9=9C=B2=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../process/threadpool/threadpool_listener.cpp | 15 ++++++++++++--- src/include/threadpool/threadpool_listener.h | 1 + 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/process/threadpool/threadpool_listener.cpp b/src/gausskernel/process/threadpool/threadpool_listener.cpp index eff445d47e..09a21aa496 100644 --- a/src/gausskernel/process/threadpool/threadpool_listener.cpp +++ b/src/gausskernel/process/threadpool/threadpool_listener.cpp @@ -533,17 +533,26 @@ void ThreadPoolListener::WakeupForHang() { gs_signal_send(m_tid, SIGUSR2); } +Dlelem* ThreadPoolListener::TryRemoveReadySessListHead() +{ + Dlelem *elem = NULL; + if (m_group->m_idleWorkerNum > 0) { + elem = m_readySessionList->RemoveHead(); + } + return elem; +} + void ThreadPoolListener::WakeupReadySessionList() { - Dlelem *elem = m_readySessionList->RemoveHead(); + Dlelem *elem = TryRemoveReadySessListHead(); knl_session_context *sess = NULL; // last time WakeupReadySession() is not finished, but m_isHang is set again - while (elem != NULL && m_group->m_idleWorkerNum > 0) { + while (elem != NULL) { sess = (knl_session_context *)DLE_VAL(elem); ereport(DEBUG2, (errmodule(MOD_THREAD_POOL), errmsg("WakeupReadySessionList remove a session:%lu from m_readySessionList", sess->session_id))); DispatchSession(sess); - elem = m_readySessionList->RemoveHead(); + elem = TryRemoveReadySessListHead(); } // m_isHang maybe set true when we do checkGroupHang again before it, now we will miss one time. // But if group is actually hang, m_isHang will be set true again. diff --git a/src/include/threadpool/threadpool_listener.h b/src/include/threadpool/threadpool_listener.h index cc8ad06767..fbacaae439 100644 --- a/src/include/threadpool/threadpool_listener.h +++ b/src/include/threadpool/threadpool_listener.h @@ -78,6 +78,7 @@ private: Dlelem *GetSessFromReadySessionList(ThreadPoolWorker *worker); void AddIdleSessionToTail(knl_session_context* session); void AddIdleSessionToHead(knl_session_context* session); + Dlelem *TryRemoveReadySessListHead(); private: ThreadId m_tid; -- Gitee From 4b51be8e3ba26ca0464a53c31cb694c71cc4a14d Mon Sep 17 00:00:00 2001 From: zhangwh Date: Mon, 1 Jul 2024 10:26:04 +0800 Subject: [PATCH 005/347] iuds opt --- src/gausskernel/runtime/executor/execScan.cpp | 2 +- .../runtime/executor/nodeIndexonlyscan.cpp | 6 +- .../runtime/executor/nodeIndexscan.cpp | 2 +- .../storage/access/heap/heapam.cpp | 7 +- .../storage/access/nbtree/nbtsearch.cpp | 66 ++++++++++++------- .../storage/access/transam/xloginsert.cpp | 12 ++-- src/include/pgstat.h | 41 ++++++------ 7 files changed, 80 insertions(+), 56 deletions(-) diff --git a/src/gausskernel/runtime/executor/execScan.cpp b/src/gausskernel/runtime/executor/execScan.cpp index 8c9a8aa7e0..dc2862c555 100755 --- a/src/gausskernel/runtime/executor/execScan.cpp +++ b/src/gausskernel/runtime/executor/execScan.cpp @@ -121,8 +121,8 @@ TupleTableSlot* ExecScan(ScanState* node, ExecScanAccessMtd access_mtd, /* funct * Fetch data from node */ qual = node->ps.qual; - proj_info = node->ps.ps_ProjInfo; econtext = node->ps.ps_ExprContext; + proj_info = node->ps.ps_ProjInfo; /* * If we have neither a qual to check nor a projection to do, just skip diff --git a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp index db613fd211..9f3c6200ed 100644 --- a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp @@ -120,8 +120,8 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) TupleTableSlot* slot = NULL; TupleTableSlot* tmpslot = NULL; ItemPointer tid; - bool isVersionScan = node->ss.isVersionScan; bool isUHeap = false; + bool isVersionScan; /* * extract necessary information from index scan node @@ -135,9 +135,10 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) else if (ScanDirectionIsBackward(direction)) direction = ForwardScanDirection; } - scandesc = node->ioss_ScanDesc; econtext = node->ss.ps.ps_ExprContext; slot = node->ss.ss_ScanTupleSlot; + isVersionScan = node->ss.isVersionScan; + scandesc = node->ioss_ScanDesc; isUHeap = RelationIsUstoreFormat(node->ss.ss_currentRelation); if (isUHeap) { tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation), @@ -239,7 +240,6 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) * Fill the scan tuple slot with data from the index. */ StoreIndexTuple(slot, indexScan->xs_itup, indexScan->xs_itupdesc); - /* * If the index was lossy, we have to recheck the index quals. * (Currently, this can never happen, but we should support the case diff --git a/src/gausskernel/runtime/executor/nodeIndexscan.cpp b/src/gausskernel/runtime/executor/nodeIndexscan.cpp index bb562a54de..0d5684ea1b 100644 --- a/src/gausskernel/runtime/executor/nodeIndexscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexscan.cpp @@ -77,9 +77,9 @@ static TupleTableSlot* IndexNext(IndexScanState* node) else if (ScanDirectionIsBackward(direction)) direction = ForwardScanDirection; } - scandesc = node->iss_ScanDesc; econtext = node->ss.ps.ps_ExprContext; slot = node->ss.ss_ScanTupleSlot; + scandesc = node->iss_ScanDesc; isUstore = RelationIsUstoreFormat(node->ss.ss_currentRelation); diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp index 6f905dfb8b..1f0f55f651 100755 --- a/src/gausskernel/storage/access/heap/heapam.cpp +++ b/src/gausskernel/storage/access/heap/heapam.cpp @@ -2481,17 +2481,18 @@ bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, S * because the SSI checks and the *Satisfies routine for historical * MVCC snapshots need the correct tid to decide about the visibility. */ - heap_tuple->t_data = (HeapTupleHeader)PageGetItem(dp, lp); heap_tuple->t_len = ItemIdGetLength(lp); - heap_tuple->t_tableOid = RelationGetRelid(relation); heap_tuple->t_bucketId = RelationGetBktid(relation); + ItemPointerSet(&heap_tuple->t_self, blkno, offnum); + heap_tuple->t_tableOid = RelationGetRelid(relation); HeapTupleCopyBaseFromPage(heap_tuple, dp); #ifdef PGXC heap_tuple->t_xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; #endif - ItemPointerSet(&heap_tuple->t_self, blkno, offnum); + heap_tuple->t_data = (HeapTupleHeader)PageGetItem(dp, lp); /* + * Shouldn't see a HEAP_ONLY tuple at chain start. */ if (at_chain_start && HeapTupleIsHeapOnly(heap_tuple)) { diff --git a/src/gausskernel/storage/access/nbtree/nbtsearch.cpp b/src/gausskernel/storage/access/nbtree/nbtsearch.cpp index 8d76bde79d..3576785aa7 100644 --- a/src/gausskernel/storage/access/nbtree/nbtsearch.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtsearch.cpp @@ -442,7 +442,6 @@ int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum * Check tuple has correct number of attributes. */ _bt_check_natts_correct(rel, key->heapkeyspace, page, offnum); - /* * Force result ">" if target item is first data item on an internal page * --- see NOTE above. @@ -475,30 +474,51 @@ int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum datum = index_getattr(itup, scankey->sk_attno, itupdesc, &isNull); if (likely((!(scankey->sk_flags & SK_ISNULL)) && !isNull)) { - /* btint4cmp */ - if (scankey->sk_func.fn_oid == F_BTINT4CMP) { - result = (int32)datum == (int32)scankey->sk_argument - ? 0 - : ((int32)datum > (int32)scankey->sk_argument ? 1 : -1); - } else if (scankey->sk_func.fn_oid == F_BTINT8CMP) { - result = (int64)datum == (int64)scankey->sk_argument - ? 0 - : ((int64)datum > (int64)scankey->sk_argument ? 1 : -1); - } else if (scankey->sk_func.fn_oid == F_BTINT84CMP) { - result = (int64)datum == (int64)(int32)scankey->sk_argument - ? 0 - : ((int64)datum > (int64)(int32)scankey->sk_argument ? 1 : -1); - } else if (scankey->sk_func.fn_oid == F_BTINT48CMP) { - result = (int64)(int32)datum == (int64)scankey->sk_argument - ? 0 - : ((int64)(int32)datum > (int64)scankey->sk_argument ? 1 : -1); + if (scankey->sk_flags & SK_BT_DESC) { + /* btint4cmp */ + if (scankey->sk_func.fn_oid == F_BTINT4CMP) { + result = (int32)datum == (int32)scankey->sk_argument + ? 0 + : ((int32)datum > (int32)scankey->sk_argument ? 1 : -1); + } else if (scankey->sk_func.fn_oid == F_BTINT8CMP) { + result = (int64)datum == (int64)scankey->sk_argument + ? 0 + : ((int64)datum > (int64)scankey->sk_argument ? 1 : -1); + } else if (scankey->sk_func.fn_oid == F_BTINT84CMP) { + result = (int64)datum == (int64)(int32)scankey->sk_argument + ? 0 + : ((int64)datum > (int64)(int32)scankey->sk_argument ? 1 : -1); + } else if (scankey->sk_func.fn_oid == F_BTINT48CMP) { + result = (int64)(int32)datum == (int64)scankey->sk_argument + ? 0 + : ((int64)(int32)datum > (int64)scankey->sk_argument ? 1 : -1); + } else { + result = DatumGetInt32( + FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument)); + } } else { - result = DatumGetInt32( - FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument)); + /* btint4cmp */ + if (scankey->sk_func.fn_oid == F_BTINT4CMP) { + result = (int32)datum == (int32)scankey->sk_argument + ? 0 + : ((int32)datum > (int32)scankey->sk_argument ? -1 : 1); + } else if (scankey->sk_func.fn_oid == F_BTINT8CMP) { + result = (int64)datum == (int64)scankey->sk_argument + ? 0 + : ((int64)datum > (int64)scankey->sk_argument ? -1 : 1); + } else if (scankey->sk_func.fn_oid == F_BTINT84CMP) { + result = (int64)datum == (int64)(int32)scankey->sk_argument + ? 0 + : ((int64)datum > (int64)(int32)scankey->sk_argument ? -1 : 1); + } else if (scankey->sk_func.fn_oid == F_BTINT48CMP) { + result = (int64)(int32)datum == (int64)scankey->sk_argument + ? 0 + : ((int64)(int32)datum > (int64)scankey->sk_argument ? -1 : 1); + } else { + result = -(DatumGetInt32( + FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument))); + } } - - if (!(scankey->sk_flags & SK_BT_DESC)) - result = -result; } else { if (scankey->sk_flags & SK_ISNULL) { /* key is NULL */ if (isNull) diff --git a/src/gausskernel/storage/access/transam/xloginsert.cpp b/src/gausskernel/storage/access/transam/xloginsert.cpp index e4c162b798..e3d6309daf 100755 --- a/src/gausskernel/storage/access/transam/xloginsert.cpp +++ b/src/gausskernel/storage/access/transam/xloginsert.cpp @@ -1028,13 +1028,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ xid = GetCurrentTransactionIdIfAny(); } - rechdr->xl_xid = xid; rechdr->xl_tot_len = total_len; - rechdr->xl_info = info; - rechdr->xl_rmid = rmid; - rechdr->xl_prev = InvalidXLogRecPtr; - rechdr->xl_crc = rdata_crc; - Assert(hashbucket_flag == false || no_hashbucket_flag == false); rechdr->xl_term = Max(g_instance.comm_cxt.localinfo_cxt.term_from_file, g_instance.comm_cxt.localinfo_cxt.term_from_xlog); if ((rechdr->xl_term & XLOG_CONTAIN_CSN) != 0) { @@ -1047,7 +1041,13 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ if (t_thrd.proc->workingVersionNum >= PARALLEL_DECODE_VERSION_NUM && XLogLogicalInfoActive()) { rechdr->xl_term |= XLOG_CONTAIN_CSN; } + rechdr->xl_xid = xid; + rechdr->xl_prev = InvalidXLogRecPtr; + rechdr->xl_info = info; + rechdr->xl_rmid = rmid; + Assert(hashbucket_flag == false || no_hashbucket_flag == false); rechdr->xl_bucket_id = (uint2)(bucket_id + 1); + rechdr->xl_crc = rdata_crc; return t_thrd.xlog_cxt.ptr_hdr_rdt; } diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 9ce999361e..ec6264b522 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -1856,12 +1856,12 @@ void pgstate_update_percentile_responsetime(void); */ static inline WaitState pgstat_report_waitstatus(WaitState waitstatus, bool isOnlyFetch = false) { - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - WaitState oldwaitstatus; - if (IS_PGSTATE_TRACK_UNDEFINE) return STATE_WAIT_UNDEFINED; + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + WaitState oldwaitstatus; + WaitState oldStatus = beentry->st_waitstatus; if (isOnlyFetch) @@ -1919,10 +1919,11 @@ static inline WaitState pgstat_report_waitstatus(WaitState waitstatus, bool isOn */ static inline WaitState pgstat_report_waitstatus_xid(WaitState waitstatus, uint64 xid, bool isOnlyFetch = false) { - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - if (IS_PGSTATE_TRACK_UNDEFINE) return STATE_WAIT_UNDEFINED; + + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + WaitState oldStatus = beentry->st_waitstatus; if (isOnlyFetch) @@ -1949,11 +1950,12 @@ static inline WaitState pgstat_report_waitstatus_xid(WaitState waitstatus, uint6 */ static inline WaitState pgstat_report_waitstatus_relname(WaitState waitstatus, char* relname, bool isOnlyFetch = false) { + if (IS_PGSTATE_TRACK_UNDEFINE) + return STATE_WAIT_UNDEFINED; + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; int len = 0; - if (IS_PGSTATE_TRACK_UNDEFINE) - return STATE_WAIT_UNDEFINED; WaitState oldStatus = beentry->st_waitstatus; if (isOnlyFetch) @@ -1992,10 +1994,11 @@ static inline WaitState pgstat_report_waitstatus_relname(WaitState waitstatus, c static inline WaitState pgstat_report_waitstatus_comm(WaitState waitstatus, int nodeId = -1, int waitnode_count = -1, int plannodeid = -1, int numnodes = -1, bool isOnlyFetch = false) { - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - if (IS_PGSTATE_TRACK_UNDEFINE) return STATE_WAIT_UNDEFINED; + + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + WaitState oldStatus = beentry->st_waitstatus; if (isOnlyFetch) @@ -2024,11 +2027,11 @@ static inline WaitState pgstat_report_waitstatus_comm(WaitState waitstatus, int */ static inline WaitStatePhase pgstat_report_waitstatus_phase(WaitStatePhase waitstatus_phase, bool isOnlyFetch = false) { - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - if (IS_PGSTATE_TRACK_UNDEFINE) return PHASE_NONE; + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + WaitStatePhase oldPhase = beentry->st_waitstatus_phase; if (isOnlyFetch) @@ -2045,10 +2048,10 @@ static inline WaitStatePhase pgstat_report_waitstatus_phase(WaitStatePhase waits static inline void pgstat_report_wait_lock_failed(uint32 wait_event_info) { - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; if (!u_sess->attr.attr_common.pgstat_track_activities || !u_sess->attr.attr_common.enable_instr_track_wait || - !beentry) + !t_thrd.shemem_ptr_cxt.MyBEEntry) return; + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; pgstat_increment_changecount_before(beentry); uint32 old_wait_event_info = beentry->st_waitevent; UpdateWaitEventFaildStat(&beentry->waitInfo, old_wait_event_info); @@ -2070,11 +2073,11 @@ static inline void pgstat_report_wait_lock_failed(uint32 wait_event_info) */ static inline void pgstat_report_waitevent(uint32 wait_event_info) { - PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - if (IS_PGSTATE_TRACK_UNDEFINE) return; + PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + pgstat_increment_changecount_before(beentry); /* * Since this is a four-byte field which is always read and written as @@ -2099,11 +2102,11 @@ static inline void pgstat_report_waitevent(uint32 wait_event_info) static inline void pgstat_report_waitevent_count(uint32 wait_event_info) { - PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - if (IS_PGSTATE_TRACK_UNDEFINE) return; + PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + pgstat_increment_changecount_before(beentry); /* * Since this is a four-byte field which is always read and written as @@ -2120,11 +2123,11 @@ static inline void pgstat_report_waitevent_count(uint32 wait_event_info) static inline void pgstat_reset_waitStatePhase(WaitState waitstatus, WaitStatePhase waitstatus_phase) { - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - if (IS_PGSTATE_TRACK_UNDEFINE) return; + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + beentry->st_waitstatus = waitstatus; beentry->st_waitstatus_phase = waitstatus_phase; -- Gitee From f8ca1af762e2e6c4d231aa483b28edc5fd7a9505 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 1 Jul 2024 11:57:24 +0800 Subject: [PATCH 006/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Don=20update=E5=9C=BA?= =?UTF-8?q?=E6=99=AF=E5=9C=A8sqlbypass=E4=B8=8B=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../runtime/opfusion/opfusion_update.cpp | 2 ++ src/test/regress/expected/single_node_update.out | 14 ++++++++++++++ src/test/regress/sql/single_node_update.sql | 7 +++++++ 3 files changed, 23 insertions(+) diff --git a/src/gausskernel/runtime/opfusion/opfusion_update.cpp b/src/gausskernel/runtime/opfusion/opfusion_update.cpp index 70b82de19f..2420e7c327 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_update.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_update.cpp @@ -368,6 +368,7 @@ lreplace: ItemPointer tupleid = NULL; bool *temp_isnull = NULL; Datum *temp_values; + int temp_nvalid = m_local.m_reslot->tts_nvalid; relkind = result_rel_info->ri_RelationDesc->rd_rel->relkind; result_rel_info = result_rel_info + m_c_local.m_estate->result_rel_index; if (relkind == RELKIND_RELATION || RELKIND_IS_SEQUENCE(relkind)) { @@ -392,6 +393,7 @@ lreplace: } m_local.m_reslot->tts_isnull = temp_isnull; m_local.m_reslot->tts_values = temp_values; + m_local.m_reslot->tts_nvalid = temp_nvalid; } if (rel->rd_att->constr) { diff --git a/src/test/regress/expected/single_node_update.out b/src/test/regress/expected/single_node_update.out index 662753fad7..0322498475 100644 --- a/src/test/regress/expected/single_node_update.out +++ b/src/test/regress/expected/single_node_update.out @@ -784,6 +784,20 @@ select * from bypass_pt_update order by a; 9 | 2 (8 rows) +drop table bypass_pt_update; +create table bypass_pt_update (a int primary key, c text, d timestamp(0) with time zone default current_timestamp(0) on update current_timestamp(0)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "bypass_pt_update_pkey" for table "bypass_pt_update" +insert into bypass_pt_update select 1, 'sdawa'; +explain (costs off) update bypass_pt_update set c = 'sdawa' where a = 1; + QUERY PLAN +------------------------------------------------------------------ + [Bypass] + Update on bypass_pt_update + -> Index Scan using bypass_pt_update_pkey on bypass_pt_update + Index Cond: (a = 1) +(4 rows) + +update bypass_pt_update set c = 'sdawa' where a = 1; drop table bypass_pt_update; set sql_beta_feature='a_style_coerce'; set enable_partition_opfusion = off; diff --git a/src/test/regress/sql/single_node_update.sql b/src/test/regress/sql/single_node_update.sql index 30dc1409f8..2479200ebf 100644 --- a/src/test/regress/sql/single_node_update.sql +++ b/src/test/regress/sql/single_node_update.sql @@ -328,6 +328,13 @@ update bypass_pt_update set a = 9 where a = 2; select * from bypass_pt_update order by a; drop table bypass_pt_update; + +create table bypass_pt_update (a int primary key, c text, d timestamp(0) with time zone default current_timestamp(0) on update current_timestamp(0)); +insert into bypass_pt_update select 1, 'sdawa'; +explain (costs off) update bypass_pt_update set c = 'sdawa' where a = 1; +update bypass_pt_update set c = 'sdawa' where a = 1; +drop table bypass_pt_update; + set sql_beta_feature='a_style_coerce'; set enable_partition_opfusion = off; set enable_opfusion = off; -- Gitee From ee4fc886887c07560327bacc14c1825d70d2b717 Mon Sep 17 00:00:00 2001 From: l1azzzy Date: Tue, 18 Jun 2024 10:51:06 +0800 Subject: [PATCH 007/347] =?UTF-8?q?tsdb=E9=80=82=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GNUmakefile.in | 1 + build/script/aarch64_opengauss_list | 5 +++++ build/script/opengauss_release_list_ubuntu_single | 5 +++++ build/script/utils/make_compile.sh | 5 +++++ build/script/x86_64_opengauss_list | 5 +++++ .../backend/utils/misc/postgresql_distribute.conf.sample | 2 +- src/common/backend/utils/misc/postgresql_single.conf.sample | 5 ++++- src/gausskernel/runtime/vecexecutor/vecexpression.cpp | 6 +++--- 8 files changed, 29 insertions(+), 5 deletions(-) diff --git a/GNUmakefile.in b/GNUmakefile.in index 1727663507..2e95bfab4d 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -102,6 +102,7 @@ install: @if test -d contrib/gms_stats; then $(MAKE) -C contrib/gms_stats $@; fi @if test -d contrib/gms_profiler; then $(MAKE) -C contrib/gms_profiler $@; fi @if test -d contrib/gms_output; then $(MAKE) -C contrib/gms_output $@; fi + @if test -d contrib/timescaledb; then export CC=${GCC_PATH}/gcc/bin/gcc; export CXX=${GCC_PATH}/gcc/bin/g++; (cd contrib/timescaledb && ./run_to_build.sh && cd build && $(MAKE) $@); fi +@echo "openGauss installation complete." endif endif diff --git a/build/script/aarch64_opengauss_list b/build/script/aarch64_opengauss_list index 9f86873960..dc5d52fa5c 100644 --- a/build/script/aarch64_opengauss_list +++ b/build/script/aarch64_opengauss_list @@ -75,6 +75,8 @@ ./share/postgresql/extension/ndpplugin--1.0.sql ./share/postgresql/extension/spqplugin.control ./share/postgresql/extension/spqplugin--1.0.sql +./share/postgresql/extension/timescaledb.control +./share/postgresql/extension/timescaledb--1.7.4.sql ./share/postgresql/extension/dolphin.control ./share/postgresql/extension/dolphin--4.0.sql ./share/postgresql/extension/dolphin--1.0--1.1.sql @@ -803,6 +805,9 @@ ./lib/postgresql/pg_plugin ./lib/postgresql/proc_srclib ./lib/postgresql/security_plugin.so +./lib/postgresql/timescaledb-1.7.4.so +./lib/postgresql/timescaledb.so +./lib/postgresql/timescaledb-tsl-1.7.4.so ./lib/postgresql/dolphin.so ./lib/postgresql/age.so ./lib/postgresql/datavec.so diff --git a/build/script/opengauss_release_list_ubuntu_single b/build/script/opengauss_release_list_ubuntu_single index 87b3530615..6172210264 100644 --- a/build/script/opengauss_release_list_ubuntu_single +++ b/build/script/opengauss_release_list_ubuntu_single @@ -61,6 +61,8 @@ ./share/postgresql/extension/hstore.control ./share/postgresql/extension/security_plugin.control ./share/postgresql/extension/security_plugin--1.0.sql +./share/postgresql/extension/timescaledb.control +./share/postgresql/extension/timescaledb--1.7.4.sql ./share/postgresql/extension/dolphin.control ./share/postgresql/extension/dolphin--4.0.sql ./share/postgresql/extension/dolphin--1.0--1.1.sql @@ -776,6 +778,9 @@ ./lib/postgresql/pg_plugin ./lib/postgresql/proc_srclib ./lib/postgresql/security_plugin.so +./lib/postgresql/timescaledb-1.7.4.so +./lib/postgresql/timescaledb.so +./lib/postgresql/timescaledb-tsl-1.7.4.so ./lib/postgresql/dolphin.so ./lib/postgresql/age.so ./lib/postgresql/datavec.so diff --git a/build/script/utils/make_compile.sh b/build/script/utils/make_compile.sh index cf93697277..c4513f54fe 100644 --- a/build/script/utils/make_compile.sh +++ b/build/script/utils/make_compile.sh @@ -238,6 +238,11 @@ function install_gaussdb() fi fi +#tsdb prepare + if [ -d "$CODE_BASE/contrib/timescaledb" ]; then + cp $CODE_BASE/contrib/timescaledb/og-timescaledb1.7.4.sql ${GAUSSHOME}/share/postgresql/extension/timescaledb--1.7.4.sql + fi + cd "$ROOT_DIR/contrib/pg_upgrade_support" make clean >> "$LOG_FILE" 2>&1 make -sj >> "$LOG_FILE" 2>&1 diff --git a/build/script/x86_64_opengauss_list b/build/script/x86_64_opengauss_list index fafad621d0..a59ef549a1 100644 --- a/build/script/x86_64_opengauss_list +++ b/build/script/x86_64_opengauss_list @@ -90,6 +90,8 @@ ./share/postgresql/extension/openGauss_expr_dolphin.ir ./share/postgresql/extension/age--1.0.0.sql ./share/postgresql/extension/age.control +./share/postgresql/extension/timescaledb.control +./share/postgresql/extension/timescaledb--1.7.4.sql ./share/postgresql/extension/datavec--0.4.4.sql ./share/postgresql/extension/datavec.control ./share/postgresql/extension/assessment--1.0.sql @@ -803,6 +805,9 @@ ./lib/postgresql/pg_plugin ./lib/postgresql/proc_srclib ./lib/postgresql/security_plugin.so +./lib/postgresql/timescaledb-1.7.4.so +./lib/postgresql/timescaledb.so +./lib/postgresql/timescaledb-tsl-1.7.4.so ./lib/postgresql/dolphin.so ./lib/postgresql/age.so ./lib/postgresql/datavec.so diff --git a/src/common/backend/utils/misc/postgresql_distribute.conf.sample b/src/common/backend/utils/misc/postgresql_distribute.conf.sample index 7d7d324928..0a0ecfad0f 100644 --- a/src/common/backend/utils/misc/postgresql_distribute.conf.sample +++ b/src/common/backend/utils/misc/postgresql_distribute.conf.sample @@ -175,7 +175,7 @@ cstore_buffers = 512MB #min 16MB #max_files_per_process = 1000 # min 25 # (change requires restart) -#shared_preload_libraries = '' # (change requires restart) +#shared_preload_libraries = '' # (change requires restart. if timescaledb is used, add $libdir/timescaledb) # - Cost-Based Vacuum Delay - #vacuum_cost_delay = 0ms # 0-100 milliseconds diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample index f23618c82c..7c28d54ac2 100644 --- a/src/common/backend/utils/misc/postgresql_single.conf.sample +++ b/src/common/backend/utils/misc/postgresql_single.conf.sample @@ -171,7 +171,10 @@ cstore_buffers = 512MB #min 16MB #max_files_per_process = 1000 # min 25 # (change requires restart) -#shared_preload_libraries = '' # (change requires restart) + + +#shared_preload_libraries = '' # (change requires restart. if timescaledb is used, add $libdir/timescaledb) + # - Cost-Based Vacuum Delay - #vacuum_cost_delay = 0ms # 0-100 milliseconds diff --git a/src/gausskernel/runtime/vecexecutor/vecexpression.cpp b/src/gausskernel/runtime/vecexecutor/vecexpression.cpp index f069e1824b..c803c8f4c4 100644 --- a/src/gausskernel/runtime/vecexecutor/vecexpression.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecexpression.cpp @@ -1996,9 +1996,9 @@ void InitVectorFunction(FunctionCallInfo finfo, MemoryContext fcacheCxt) if (u_sess->attr.attr_sql.dolphin && g_instance.plugin_vec_func_cxt.vec_func_plugin[DOLPHIN_VEC] != NULL) { vec_func_hash = g_instance.plugin_vec_func_cxt.vec_func_plugin[DOLPHIN_VEC]; - } else if (u_sess->attr.attr_sql.whale && - g_instance.plugin_vec_func_cxt.vec_func_plugin[WHALE_VEC] != NULL) { - vec_func_hash = g_instance.plugin_vec_func_cxt.vec_func_plugin[WHALE_VEC]; + } else if (u_sess->attr.attr_sql.whale && + g_instance.plugin_vec_func_cxt.vec_func_plugin[WHALE_VEC] != NULL) { + vec_func_hash = g_instance.plugin_vec_func_cxt.vec_func_plugin[WHALE_VEC]; } else #endif vec_func_hash = g_instance.vec_func_hash; -- Gitee From 66948ff5ba6a2253887909964d863a3a0566fcdb Mon Sep 17 00:00:00 2001 From: lexin184383 Date: Tue, 2 Jul 2024 11:21:09 +0800 Subject: [PATCH 008/347] =?UTF-8?q?asan=E5=9B=9E=E5=BD=92=E6=B5=8B?= =?UTF-8?q?=E8=AF=95=E7=94=A8=E4=BE=8B=E5=8F=91=E7=8E=B0pagehack=E5=B7=A5?= =?UTF-8?q?=E5=85=B7=E6=9C=89=E5=86=85=E5=AD=98=E8=B6=8A=E7=95=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/pagehack/pagehack.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/pagehack/pagehack.cpp b/contrib/pagehack/pagehack.cpp index 28718c1638..294d4b79bd 100644 --- a/contrib/pagehack/pagehack.cpp +++ b/contrib/pagehack/pagehack.cpp @@ -3963,7 +3963,7 @@ static int parse_pg_control_file(char* filename) FILE* fd = NULL; ControlFileData ControlFile; size_t readBytes = 0; - char* wal_level_str[] = {"minimal", "archive", "hot_standby"}; + char* wal_level_str[] = {"minimal", "archive", "hot_standby", "logical"}; char* db_state_str[] = {"starting up", "shut down", "shut down in recovery", -- Gitee From cb0359b9818e82b1818d885e30bf341e69bf8088 Mon Sep 17 00:00:00 2001 From: lexin184383 Date: Tue, 2 Jul 2024 11:24:28 +0800 Subject: [PATCH 009/347] =?UTF-8?q?select=20'a'@@=20repeat('3@n',96000);?= =?UTF-8?q?=E8=AF=AD=E5=8F=A5=E6=89=A7=E8=A1=8C=E6=97=B6=E9=97=B4=E8=BE=83?= =?UTF-8?q?=E9=95=BF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/tsearch/wparser_def.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/common/backend/tsearch/wparser_def.cpp b/src/common/backend/tsearch/wparser_def.cpp index f122331165..df0f60ee37 100644 --- a/src/common/backend/tsearch/wparser_def.cpp +++ b/src/common/backend/tsearch/wparser_def.cpp @@ -22,6 +22,7 @@ #include "tsearch/ts_type.h" #include "tsearch/ts_utils.h" #include "utils/builtins.h" +#include "miscadmin.h" /* Define me to enable tracing of parser behavior */ /* Output token categories */ @@ -920,6 +921,8 @@ static int p_ishost(TParser* prs) tmpprs->wanthost = true; + check_stack_depth(); + if (TParserGet(tmpprs) && tmpprs->type == HOST) { prs->state->posbyte += tmpprs->lenbytetoken; prs->state->poschar += tmpprs->lenchartoken; -- Gitee From 3bdceb6f403aa0d61a890f2a489ab974c5b3e4d2 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Tue, 2 Jul 2024 19:36:36 +0800 Subject: [PATCH 010/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=80=E4=B8=AAcur?= =?UTF-8?q?sor=20expression=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_expr.cpp | 5 +- src/gausskernel/runtime/executor/execQual.cpp | 9 ++ .../regress/expected/cursor_expression.out | 100 ++++++++++++++++++ src/test/regress/sql/cursor_expression.sql | 44 ++++++++ 4 files changed, 155 insertions(+), 3 deletions(-) diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 681cf44c5c..7a55479be9 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -3831,9 +3831,8 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur int nParamExec = 0; parse_state_temp = parse_state_parent; - while (parse_state_temp != NULL) { - nParamExec += list_length(parse_state_temp->cursor_expression_para_var); - parse_state_temp = parse_state_temp->parentParseState; + if (parse_state_temp != NULL) { + nParamExec = list_length(parse_state_temp->cursor_expression_para_var); } plan_tree->nParamExec = nParamExec; diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index 69e6312474..ba8a50db3a 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -5705,6 +5705,15 @@ static Datum ExecEvalCursorExpression(CursorExpressionState* state, ExprContext* PortalStart(portal, econtext->ecxt_param_list_info, 0, GetActiveSnapshot()); int plan_param_number = ((PlannedStmt*)(cursor_expression->plan))->nParamExec; + int state_param_number = list_length(state->param); + + if (unlikely(plan_param_number > state_param_number)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmodule(MOD_EXECUTOR), + errmsg("The expected number of parameters is %d, but actual is %d.", plan_param_number, state_param_number))); + } + for (int i = 0; i < plan_param_number; i++) { bool expr_is_null = false; ParamExecData* prm = &(portal->queryDesc->estate->es_param_exec_vals[i]); diff --git a/src/test/regress/expected/cursor_expression.out b/src/test/regress/expected/cursor_expression.out index 6b3a59b675..576fa27896 100644 --- a/src/test/regress/expected/cursor_expression.out +++ b/src/test/regress/expected/cursor_expression.out @@ -804,7 +804,107 @@ NOTICE: last_name : heliu (1 row) drop procedure test_cursor_2; +-- +create table t_cursor_0011_01(department_id int, department_name varchar(50)); +create table t_cursor_0011_02(employees_id int, department_id int, first_name varchar(50), last_name varchar(50), email varchar(50), phone_number int); +insert into t_cursor_0011_01 values (1, 'sale'),(2, 'rd'),(3, 'pro'); +insert into t_cursor_0011_01 values (1, 'sale'),(2, 'rd'),(3, 'pro');; +insert into t_cursor_0011_02 values (1, 1, 'zhang', 'san', '1@abc.com', 123),(2, 1, 'li', 'si', '2@abc.com', 124); +insert into t_cursor_0011_02 values (1, 2, 'wang', 'wu', '3@abc.com', 321),(2, 2, 'he', 'liu', '4@abc.com', 421); +create or replace procedure pro_cursor_0011_02 +as + company_name varchar(100); + department_name varchar(100); + employees_id_var int; + name_var varchar(100); + type ref_cur_type is ref cursor; + my_cur ref_cur_type; + my_cur2 ref_cur_type; + cursor c1 is select department_id, cursor(select department_name, cursor(select department_id ) from t_cursor_0011_02) from t_cursor_0011_01 d order by department_name; +begin + open c1; + loop + fetch c1 into department_name, my_cur; + exit when c1%notfound; + raise notice 'department_name : % %',department_name, my_cur; + loop + fetch my_cur into name_var, my_cur2; + exit when my_cur%notfound; + raise notice ' department employees info : % %',name_var, my_cur2; + loop + fetch my_cur2 into employees_id_var; + exit when my_cur2%notfound; + raise notice ' employees_id_var : %',employees_id_var; + end loop; + end loop; + close my_cur; + end loop; +end; +/ +call pro_cursor_0011_02(); +NOTICE: department_name : 3 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 1 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 1 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 2 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 2 +NOTICE: department_name : 3 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 1 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 1 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 2 +NOTICE: department employees info : pro +NOTICE: employees_id_var : 2 +NOTICE: department_name : 2 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 1 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 1 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 2 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 2 +NOTICE: department_name : 2 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 1 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 1 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 2 +NOTICE: department employees info : rd +NOTICE: employees_id_var : 2 +NOTICE: department_name : 1 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 1 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 1 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 2 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 2 +NOTICE: department_name : 1 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 1 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 1 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 2 +NOTICE: department employees info : sale +NOTICE: employees_id_var : 2 + pro_cursor_0011_02 +-------------------- + +(1 row) + -- clean +drop procedure pro_cursor_0011_02; +drop table t_cursor_0011_01; +drop table t_cursor_0011_02; drop table res_count1; drop table employees; drop table departments; diff --git a/src/test/regress/sql/cursor_expression.sql b/src/test/regress/sql/cursor_expression.sql index 6e5b1447ac..6412a9e299 100644 --- a/src/test/regress/sql/cursor_expression.sql +++ b/src/test/regress/sql/cursor_expression.sql @@ -481,7 +481,51 @@ call test_cursor_2(); drop procedure test_cursor_2; +-- +create table t_cursor_0011_01(department_id int, department_name varchar(50)); +create table t_cursor_0011_02(employees_id int, department_id int, first_name varchar(50), last_name varchar(50), email varchar(50), phone_number int); + +insert into t_cursor_0011_01 values (1, 'sale'),(2, 'rd'),(3, 'pro'); +insert into t_cursor_0011_01 values (1, 'sale'),(2, 'rd'),(3, 'pro');; +insert into t_cursor_0011_02 values (1, 1, 'zhang', 'san', '1@abc.com', 123),(2, 1, 'li', 'si', '2@abc.com', 124); +insert into t_cursor_0011_02 values (1, 2, 'wang', 'wu', '3@abc.com', 321),(2, 2, 'he', 'liu', '4@abc.com', 421); + +create or replace procedure pro_cursor_0011_02 +as + company_name varchar(100); + department_name varchar(100); + employees_id_var int; + name_var varchar(100); + type ref_cur_type is ref cursor; + my_cur ref_cur_type; + my_cur2 ref_cur_type; + cursor c1 is select department_id, cursor(select department_name, cursor(select department_id ) from t_cursor_0011_02) from t_cursor_0011_01 d order by department_name; +begin + open c1; + loop + fetch c1 into department_name, my_cur; + exit when c1%notfound; + raise notice 'department_name : % %',department_name, my_cur; + loop + fetch my_cur into name_var, my_cur2; + exit when my_cur%notfound; + raise notice ' department employees info : % %',name_var, my_cur2; + loop + fetch my_cur2 into employees_id_var; + exit when my_cur2%notfound; + raise notice ' employees_id_var : %',employees_id_var; + end loop; + end loop; + close my_cur; + end loop; +end; +/ +call pro_cursor_0011_02(); + -- clean +drop procedure pro_cursor_0011_02; +drop table t_cursor_0011_01; +drop table t_cursor_0011_02; drop table res_count1; drop table employees; drop table departments; -- Gitee From a0fbf45eb53ce7be20d10c7d78f2e7d5604fed40 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 3 Jul 2024 19:08:20 +0800 Subject: [PATCH 011/347] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91:?= =?UTF-8?q?=20=E4=BF=AE=E5=A4=8DIA92ZU=E6=89=80=E7=A4=BA=E7=9A=84=E6=AD=A3?= =?UTF-8?q?=E5=88=99=E8=A1=A8=E8=BE=BE=E5=BC=8F=E4=B8=BA[[:digit:]]?= =?UTF-8?q?=E6=97=B6core=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE=E5=A4=8DIA92Z?= =?UTF-8?q?U=E6=89=80=E7=A4=BA=E7=9A=84=E6=AD=A3=E5=88=99=E8=A1=A8?= =?UTF-8?q?=E8=BE=BE=E5=BC=8F=E4=B8=BA[[:digit:]]=E6=97=B6core=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98=E3=80=82=20=E3=80=90=E6=A0=B9=E5=9B=A0?= =?UTF-8?q?=E5=88=86=E6=9E=90=E3=80=91:=20=E8=AE=BF=E9=97=AE=E7=A9=BA?= =?UTF-8?q?=E6=8C=87=E9=92=88=E4=BA=86=E3=80=82=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E8=AE=BF=E9=97=AE?= =?UTF-8?q?=E5=89=8D=E5=A2=9E=E5=8A=A0=E6=8C=87=E9=92=88=E5=88=A4=E6=96=AD?= =?UTF-8?q?=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82?= =?UTF-8?q?=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengaussorg/das?= =?UTF-8?q?hboard=3Fissue=3DIA92ZU?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/regex/regprefix.cpp | 4 +- src/test/regress/expected/test_regexp.out | 247 ++++++++++++++++++++++ src/test/regress/parallel_schedule0A | 2 +- src/test/regress/sql/test_regexp.sql | 105 +++++++++ 4 files changed, 356 insertions(+), 2 deletions(-) create mode 100644 src/test/regress/expected/test_regexp.out create mode 100644 src/test/regress/sql/test_regexp.sql diff --git a/src/common/backend/regex/regprefix.cpp b/src/common/backend/regex/regprefix.cpp index 55d6354ce4..4991f3a17c 100644 --- a/src/common/backend/regex/regprefix.cpp +++ b/src/common/backend/regex/regprefix.cpp @@ -192,8 +192,10 @@ static int findprefix(struct cnfa* cnfa, struct colormap* cm, chr* string, size_ */ if (cm->cd[thiscolor].nschrs == 1 || thiscolor == 0) { c = cm->cd[thiscolor].firstchr; - } else { + } else if (cm->cmranges != NULL && thiscolor > 0 && thiscolor <= cm->numcmranges) { c = cm->cmranges[thiscolor - 1].cmin; + } else { + break; } if (GETCOLOR(cm, c) != thiscolor) break; diff --git a/src/test/regress/expected/test_regexp.out b/src/test/regress/expected/test_regexp.out new file mode 100644 index 0000000000..b8309dc387 --- /dev/null +++ b/src/test/regress/expected/test_regexp.out @@ -0,0 +1,247 @@ +-- create new schema -- +drop schema if exists test_regexp; +NOTICE: schema "test_regexp" does not exist, skipping +create schema test_regexp; +set search_path=test_regexp; +create table t1 +( + f_int1 integer default 0 not null, + f_int2 integer, + f_int3 integer, + f_bigint1 bigint, + f_bigint2 bigint, + f_bigint3 bigint, + f_bool1 bool, + f_bool2 bool, + f_num1 number(38, 0), + f_num2 number(38, 0), + f_dec1 DECIMAL(38, 0), + f_dec2 DECIMAL(38, 0), + f_num10 number(38, 10), + f_dec10 decimal(38, 10), + f_float float, + f_double double precision, + f_real real, + f_char1 char(128), + f_char2 char(128), + f_varchar1 varchar(512), + f_varchar2 varchar2(512), + f_date1 date, + f_date2 date, + f_time date, + f_timestamp timestamp, + f_tp_tz timestamp, + f_tp_ltz timestamp, + f_binary bytea, + f_varbinary bytea, + f_blob blob, + f_clob clob +); +create index idx_1 on t1(f_int1); +delete from t1; +insert into t1(f_int1, f_varchar1) values (1,'1234560'); +insert into t1(f_int1, f_varchar1) values (2,'1234560'); +insert into t1(f_int1, f_varchar1) values (3,'1b3b560'); +insert into t1(f_int1, f_varchar1) values (4,'abc'); +insert into t1(f_int1, f_varchar1) values (5,'abcde'); +insert into t1(f_int1, f_varchar1) values (6,'ADREasx'); +insert into t1(f_int1, f_varchar1) values (7,'123 45'); +insert into t1(f_int1, f_varchar1) values (8,'adc de'); +insert into t1(f_int1, f_varchar1) values (9,'adc,.de'); +insert into t1(f_int1, f_varchar1) values (10,'1B'); +insert into t1(f_int1, f_varchar1) values (11,'abcbvbnb'); +insert into t1(f_int1, f_varchar1) values (12,'11114560'); +insert into t1(f_int1, f_varchar1) values (13,'11124560'); +insert into t1(f_int1, f_varchar1) values(14, 'abc'||chr(10)||'DEF'||chr(10)||'hij'); +insert into t1(f_int1, f_varchar1) values(15, '1abc2abc3abc4abc5abc6abc7abc8abc9abc9abcAabcBabc'); +insert into t1(f_int1, f_varchar1) values(16, '1abc2abc3abc4abc5abc6abc7abc8abc9abcAabcAabcBabc'); +insert into t1(f_int1, f_varchar1) values(17, 'oltp100'); +insert into t1(f_int1, f_varchar1) values(18, 'oltp 100'); +insert into t1(f_int1, f_char1, f_varchar2) values(19,'Fluffy','Fluffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(20,'Buffy','Buffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(21,'fluffy','fluffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(22,'buffy','buffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(23,'桂林山水abc高山流水','桂林山水abc高山流水'); +insert into t1(f_int1, f_char1, f_varchar2) values(24,'aa abc zzzz','aa abc zzzz'); +insert into t1(f_int1, f_char1, f_varchar2) values(25,'我的的的的 abcabcabcabcabcabcabcabcabcabcabcabc','我的的的的 abcabcabcabcabcabcabcabcabcabcabcabc'); +insert into t1(f_int1, f_char1, f_varchar2) values(26,'abcbvbnb +efgh +ijjkkkkkkk','abcbvbnb +efgh +ijjkkkkkkk123'); +insert into t1(f_int1, f_char1, f_varchar2) values(27,'abc efg','hgj khln'); +insert into t1(f_int1, f_char1, f_varchar2) values(28,'abc\efg','hgj(khln'); +insert into t1(f_int1, f_char1, f_varchar2) values(29,'*+?|^${}.','*+?|^${}.'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'1....60'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 3 | 1b3b560 + 12 | 11114560 + 13 | 11124560 +(5 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'1[0-9]{4}60'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 12 | 11114560 + 13 | 11124560 +(4 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'1[[:digit:]]{4}60'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 12 | 11114560 + 13 | 11124560 +(4 rows) + +select f_int1,f_varchar1 from t1 where not regexp_like(f_varchar1,'^[[:digit:]]+$'); + f_int1 | f_varchar1 +--------+-------------------------------------------------- + 3 | 1b3b560 + 4 | abc + 5 | abcde + 6 | ADREasx + 7 | 123 45 + 8 | adc de + 9 | adc,.de + 10 | 1B + 11 | abcbvbnb + 14 | abc + + | DEF + + | hij + 15 | 1abc2abc3abc4abc5abc6abc7abc8abc9abc9abcAabcBabc + 16 | 1abc2abc3abc4abc5abc6abc7abc8abc9abcAabcAabcBabc + 17 | oltp100 + 18 | oltp 100 +(14 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^[^[:digit:]]+$'); + f_int1 | f_varchar1 +--------+------------ + 4 | abc + 5 | abcde + 6 | ADREasx + 8 | adc de + 9 | adc,.de + 11 | abcbvbnb + 14 | abc + + | DEF + + | hij +(7 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^1[2B]'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 7 | 123 45 + 10 | 1B +(4 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'[[:space:]]'); + f_int1 | f_varchar1 +--------+------------ + 7 | 123 45 + 8 | adc de + 14 | abc + + | DEF + + | hij + 18 | oltp 100 +(4 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^([a-z]+|[0-9]+)$'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 4 | abc + 5 | abcde + 11 | abcbvbnb + 12 | 11114560 + 13 | 11124560 +(7 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'[[:punct:]]'); + f_int1 | f_varchar1 +--------+------------ + 9 | adc,.de +(1 row) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^DEF$'); + f_int1 | f_varchar1 +--------+------------ +(0 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^1[2b]','ic'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 3 | 1b3b560 + 7 | 123 45 +(4 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^1[2b]','ci'); + f_int1 | f_varchar1 +--------+------------ + 1 | 1234560 + 2 | 1234560 + 3 | 1b3b560 + 7 | 123 45 + 10 | 1B +(5 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'a'); + f_int1 | f_varchar1 +--------+-------------------------------------------------- + 4 | abc + 5 | abcde + 6 | ADREasx + 8 | adc de + 9 | adc,.de + 11 | abcbvbnb + 14 | abc + + | DEF + + | hij + 15 | 1abc2abc3abc4abc5abc6abc7abc8abc9abc9abcAabcBabc + 16 | 1abc2abc3abc4abc5abc6abc7abc8abc9abcAabcAabcBabc +(9 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'(1abc)(2abc)(3abc)(4abc)(5abc)(6abc)(7abc)(8abc)(9abc)\9(Aabc)(Babc)'); + f_int1 | f_varchar1 +--------+-------------------------------------------------- + 15 | 1abc2abc3abc4abc5abc6abc7abc8abc9abc9abcAabcBabc +(1 row) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'(1abc)(2abc)(3abc)(4abc)(5abc)(6abc)(7abc)(8abc)(9abc)(Aabc)\a(Babc)'); + f_int1 | f_varchar1 +--------+------------ +(0 rows) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'oltp 100'); + f_int1 | f_varchar1 +--------+------------ + 18 | oltp 100 +(1 row) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'oltp100'); + f_int1 | f_varchar1 +--------+------------ + 17 | oltp100 +(1 row) + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'ffy*') order by 1; + f_int1 | f_varchar1 +--------+------------ +(0 rows) + +-- clean +drop index idx_1; +drop table t1; +drop schema if exists test_regexp cascade; diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index be351e8af4..b22d85016f 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -161,7 +161,7 @@ test: single_node_inet single_node_macaddr single_node_tstypes single_node_comme # ---------- #test: single_node_geometry single_node_horology #test: single_node_regex -test: single_node_regex_temp +test: single_node_regex_temp test_regexp test: single_node_oidjoins single_node_type_sanity # ---------- diff --git a/src/test/regress/sql/test_regexp.sql b/src/test/regress/sql/test_regexp.sql new file mode 100644 index 0000000000..80c50afec4 --- /dev/null +++ b/src/test/regress/sql/test_regexp.sql @@ -0,0 +1,105 @@ +-- create new schema -- +drop schema if exists test_regexp; +create schema test_regexp; +set search_path=test_regexp; + + +create table t1 +( + f_int1 integer default 0 not null, + f_int2 integer, + f_int3 integer, + f_bigint1 bigint, + f_bigint2 bigint, + f_bigint3 bigint, + f_bool1 bool, + f_bool2 bool, + f_num1 number(38, 0), + f_num2 number(38, 0), + f_dec1 DECIMAL(38, 0), + f_dec2 DECIMAL(38, 0), + f_num10 number(38, 10), + f_dec10 decimal(38, 10), + f_float float, + f_double double precision, + f_real real, + f_char1 char(128), + f_char2 char(128), + f_varchar1 varchar(512), + f_varchar2 varchar2(512), + f_date1 date, + f_date2 date, + f_time date, + f_timestamp timestamp, + f_tp_tz timestamp, + f_tp_ltz timestamp, + f_binary bytea, + f_varbinary bytea, + f_blob blob, + f_clob clob +); + +create index idx_1 on t1(f_int1); + +delete from t1; +insert into t1(f_int1, f_varchar1) values (1,'1234560'); +insert into t1(f_int1, f_varchar1) values (2,'1234560'); +insert into t1(f_int1, f_varchar1) values (3,'1b3b560'); +insert into t1(f_int1, f_varchar1) values (4,'abc'); +insert into t1(f_int1, f_varchar1) values (5,'abcde'); +insert into t1(f_int1, f_varchar1) values (6,'ADREasx'); +insert into t1(f_int1, f_varchar1) values (7,'123 45'); +insert into t1(f_int1, f_varchar1) values (8,'adc de'); +insert into t1(f_int1, f_varchar1) values (9,'adc,.de'); +insert into t1(f_int1, f_varchar1) values (10,'1B'); +insert into t1(f_int1, f_varchar1) values (11,'abcbvbnb'); +insert into t1(f_int1, f_varchar1) values (12,'11114560'); +insert into t1(f_int1, f_varchar1) values (13,'11124560'); +insert into t1(f_int1, f_varchar1) values(14, 'abc'||chr(10)||'DEF'||chr(10)||'hij'); +insert into t1(f_int1, f_varchar1) values(15, '1abc2abc3abc4abc5abc6abc7abc8abc9abc9abcAabcBabc'); +insert into t1(f_int1, f_varchar1) values(16, '1abc2abc3abc4abc5abc6abc7abc8abc9abcAabcAabcBabc'); +insert into t1(f_int1, f_varchar1) values(17, 'oltp100'); +insert into t1(f_int1, f_varchar1) values(18, 'oltp 100'); +insert into t1(f_int1, f_char1, f_varchar2) values(19,'Fluffy','Fluffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(20,'Buffy','Buffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(21,'fluffy','fluffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(22,'buffy','buffy'); +insert into t1(f_int1, f_char1, f_varchar2) values(23,'桂林山水abc高山流水','桂林山水abc高山流水'); +insert into t1(f_int1, f_char1, f_varchar2) values(24,'aa abc zzzz','aa abc zzzz'); +insert into t1(f_int1, f_char1, f_varchar2) values(25,'我的的的的 abcabcabcabcabcabcabcabcabcabcabcabc','我的的的的 abcabcabcabcabcabcabcabcabcabcabcabc'); +insert into t1(f_int1, f_char1, f_varchar2) values(26,'abcbvbnb +efgh +ijjkkkkkkk','abcbvbnb +efgh +ijjkkkkkkk123'); +insert into t1(f_int1, f_char1, f_varchar2) values(27,'abc efg','hgj khln'); +insert into t1(f_int1, f_char1, f_varchar2) values(28,'abc\efg','hgj(khln'); +insert into t1(f_int1, f_char1, f_varchar2) values(29,'*+?|^${}.','*+?|^${}.'); + + + +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'1....60'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'1[0-9]{4}60'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'1[[:digit:]]{4}60'); +select f_int1,f_varchar1 from t1 where not regexp_like(f_varchar1,'^[[:digit:]]+$'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^[^[:digit:]]+$'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^1[2B]'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'[[:space:]]'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^([a-z]+|[0-9]+)$'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'[[:punct:]]'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^DEF$'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^1[2b]','ic'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'^1[2b]','ci'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'a'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'(1abc)(2abc)(3abc)(4abc)(5abc)(6abc)(7abc)(8abc)(9abc)\9(Aabc)(Babc)'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'(1abc)(2abc)(3abc)(4abc)(5abc)(6abc)(7abc)(8abc)(9abc)(Aabc)\a(Babc)'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'oltp 100'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'oltp100'); +select f_int1,f_varchar1 from t1 where regexp_like(f_varchar1,'ffy*') order by 1; + +-- clean +drop index idx_1; +drop table t1; + +drop schema if exists test_regexp cascade; + -- Gitee From 46956673c44e79f989d345d2cad24d269dcdd74b Mon Sep 17 00:00:00 2001 From: huangjiajun <1148612505@qq.com> Date: Tue, 7 May 2024 10:51:56 +0800 Subject: [PATCH 012/347] support float & numeric with negative scale --- src/bin/pg_dump/pg_dump.cpp | 2 +- src/common/backend/parser/gram.y | 79 ++++- src/common/backend/utils/adt/format_type.cpp | 3 + src/common/backend/utils/adt/numeric.cpp | 184 +++++++---- src/common/backend/utils/adt/xml.cpp | 2 +- src/common/backend/utils/init/globals.cpp | 3 +- src/common/backend/utils/misc/guc/guc_sql.cpp | 3 +- .../libpq/client_logic_fmt/numeric.cpp | 2 +- src/common/pl/plpgsql/src/pl_scanner.cpp | 2 +- src/include/miscadmin.h | 7 +- src/include/utils/numeric.h | 2 + src/test/regress/expected/test_float.out | 160 +++++++++ .../expected/test_numeric_with_neg_scale.out | 311 ++++++++++++++++++ src/test/regress/parallel_schedule0A | 2 + src/test/regress/sql/test_float.sql | 86 +++++ .../sql/test_numeric_with_neg_scale.sql | 123 +++++++ 16 files changed, 885 insertions(+), 86 deletions(-) create mode 100644 src/test/regress/expected/test_float.out create mode 100644 src/test/regress/expected/test_numeric_with_neg_scale.out create mode 100644 src/test/regress/sql/test_float.sql create mode 100644 src/test/regress/sql/test_numeric_with_neg_scale.sql diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 5c1a73e49e..a6d256110c 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -23236,7 +23236,7 @@ static char* myFormatType(const char* typname, int32 typmod) tmp_typmod = typmod - VARHDRSZ; precision = (tmp_typmod >> 16) & 0xffff; - scale = tmp_typmod & 0xffff; + scale = (int16)(tmp_typmod & 0xffff); appendPQExpBuffer(buf, "(%d,%d)", precision, scale); } } diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 9620c5ae40..bf6dde2f94 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -265,6 +265,8 @@ static int64 SequenceStrGetInt64(const char *str); static int GetLoadType(int load_type_f, int load_type_s); static Node *MakeSqlLoadNode(char *colname); static void checkDeleteRelationError(); +static TypeName *ParseFloatByExtentedPrecision(int ival, int location, core_yyscan_t yyscanner); +static void CheckTypmodScale(List* arglist, int location, core_yyscan_t yyscanner); /* start with .. connect by related utilities */ static bool IsConnectByRootIdent(Node* node); @@ -25855,6 +25857,7 @@ Numeric: INT_P { if ($2 != NULL) { + CheckTypmodScale($2, @2, yyscanner); $$ = SystemTypeName("numeric"); $$->typmods = $2; $$->location = @1; @@ -25907,24 +25910,28 @@ Numeric: INT_P } | DECIMAL_P opt_type_modifiers { + CheckTypmodScale($2, @2, yyscanner); $$ = SystemTypeName("numeric"); $$->typmods = $2; $$->location = @1; } | NUMBER_P opt_type_modifiers { + CheckTypmodScale($2, @2, yyscanner); $$ = SystemTypeName("numeric"); $$->typmods = $2; $$->location = @1; } | DEC opt_type_modifiers { + CheckTypmodScale($2, @2, yyscanner); $$ = SystemTypeName("numeric"); $$->typmods = $2; $$->location = @1; } | NUMERIC opt_type_modifiers { + CheckTypmodScale($2, @2, yyscanner); $$ = SystemTypeName("numeric"); $$->typmods = $2; $$->location = @1; @@ -25950,22 +25957,34 @@ opt_float: '(' Iconst ')' errmsg("precision for type float must be at least 1 bit"), parser_errposition(@2))); } - else if ($2 <= 24) - $$ = SystemTypeName("float4"); - else if ($2 <= 53) - $$ = SystemTypeName("float8"); - else { - const char* message = "precision for type float must be less than 54 bits"; - InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("precision for type float must be less than 54 bits"), - parser_errposition(@2))); + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && FLOAT_AS_NUMERIC + && t_thrd.proc->workingVersionNum >= FLOAT_VERSION_NUMBER) { + $$ = ParseFloatByExtentedPrecision($2, @2, yyscanner); + } else { + if ($2 <= 24) + $$ = SystemTypeName("float4"); + else if ($2 <= 53) + $$ = SystemTypeName("float8"); + else { + const char* message = "precision for type float must be less than 54 bits"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("precision for type float must be less than 54 bits"), + parser_errposition(@2))); + } } } | /*EMPTY*/ { - $$ = SystemTypeName("float8"); + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && FLOAT_AS_NUMERIC + && t_thrd.proc->workingVersionNum >= FLOAT_VERSION_NUMBER) + { + $$ = SystemTypeName("numeric"); + $$->typmods = list_make2(makeIntConst(126, -1), makeIntConst(-32768, -1)); + } else { + $$ = SystemTypeName("float8"); + } } ; @@ -32405,6 +32424,42 @@ static void parameter_check_execute_direct(const char* query) } } +static TypeName *ParseFloatByExtentedPrecision(int ival, int location, core_yyscan_t yyscanner) +{ + TypeName *typnam = NULL; + + /* Float binary precision must be between 1 and 126 */ + if (ival <= 126) { + typnam = SystemTypeName("numeric"); + typnam->typmods = list_make2(makeIntConst(ival, location), makeIntConst(-32768, -1)); + } else { + const char* message = "precision for type float must be less than 127 bits"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("precision for type float must be less than 127 bits"), + parser_errposition(location))); + } + return typnam; +} + +static void CheckTypmodScale(List* arglist, int location, core_yyscan_t yyscanner) +{ + if (t_thrd.proc->workingVersionNum >= FLOAT_VERSION_NUMBER && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && list_length(arglist) == 2) { + Node *arg = (Node*)lsecond(arglist); + A_Const *n = IsA(arg, A_Const) ? (A_Const *)arg : NULL; + if (n != NULL && (n->val.val.ival > NUMERIC_MAX_SCALE || n->val.val.ival < NUMERIC_MIN_SCALE)) { + const char* message = "NUMERIC scale must be between -84 and 1000"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("NUMERIC scale must be between -84 and 1000"), + parser_errposition(location))); + } + } +} + static Node *make_node_from_scanbuf(int start_pos, int end_pos, core_yyscan_t yyscanner) { base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner); diff --git a/src/common/backend/utils/adt/format_type.cpp b/src/common/backend/utils/adt/format_type.cpp index b82c181e87..63108169de 100644 --- a/src/common/backend/utils/adt/format_type.cpp +++ b/src/common/backend/utils/adt/format_type.cpp @@ -373,6 +373,9 @@ char* printTypmod(const char* typname, int32 typmod, Oid typmodout) } else { /* Use the type-specific typmodout procedure */ char* tmstr = NULL; + if (strcmp(typname, "numeric") == 0 && ((int16)(((uint32)(typmod - VARHDRSZ)) & 0XFFFF)) == PG_INT16_MIN) { + typname = "float"; + } tmstr = DatumGetCString(OidFunctionCall1(typmodout, Int32GetDatum(typmod))); res = psnprintf(strlen(typname) + strlen(tmstr) + 1, "%s%s", typname, tmstr); diff --git a/src/common/backend/utils/adt/numeric.cpp b/src/common/backend/utils/adt/numeric.cpp index dfffb2070b..71748c0b81 100644 --- a/src/common/backend/utils/adt/numeric.cpp +++ b/src/common/backend/utils/adt/numeric.cpp @@ -215,6 +215,7 @@ static char* output_get_str_from_var(NumericVar* var); static char* get_str_from_var_sci(NumericVar* var, int rscale); static void apply_typmod(NumericVar* var, int32 typmod); +static void round_float_var(NumericVar* var, int precision); static int32 numericvar_to_int32(const NumericVar* var, bool can_ignore = false); static double numericvar_to_double_no_overflow(NumericVar* var); @@ -725,8 +726,8 @@ Datum numeric_support(PG_FUNCTION_ARGS) Node* source = (Node*)linitial(expr->args); int32 old_typmod = exprTypmod(source); int32 new_typmod = DatumGetInt32(((Const*)typmod)->constvalue); - int32 old_scale = (int32)(((uint32)(old_typmod - VARHDRSZ)) & 0xffff); - int32 new_scale = (int32)(((uint32)(new_typmod - VARHDRSZ)) & 0xffff); + int32 old_scale = (int16)(((uint32)(old_typmod - VARHDRSZ)) & 0xffff); + int32 new_scale = (int16)(((uint32)(new_typmod - VARHDRSZ)) & 0xffff); int32 old_precision = (int32)(((uint32)(old_typmod - VARHDRSZ)) >> 16 & 0xffff); int32 new_precision = (int32)(((uint32)(new_typmod - VARHDRSZ)) >> 16 & 0xffff); @@ -793,7 +794,7 @@ Datum numeric(PG_FUNCTION_ARGS) */ tmp_typmod = typmod - VARHDRSZ; precision = (tmp_typmod >> 16) & 0xffff; - scale = tmp_typmod & 0xffff; + scale = (int16)(tmp_typmod & 0xffff); maxdigits = precision - scale; /* @@ -842,15 +843,17 @@ Datum numerictypmodin(PG_FUNCTION_ARGS) tl = ArrayGetIntegerTypmods(ta, &n); if (n == 2) { - if (tl[0] < 1 || tl[0] > NUMERIC_MAX_PRECISION) + if (tl[0] < 1 || tl[0] > NUMERIC_MAX_PRECISION) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("NUMERIC precision %d must be between 1 and %d", tl[0], NUMERIC_MAX_PRECISION))); - if (tl[1] < 0 || tl[1] > tl[0]) + } + if (!(DB_IS_CMPT(A_FORMAT) && t_thrd.proc->workingVersionNum >= FLOAT_VERSION_NUMBER) && (tl[1] < 0 || tl[1] > tl[0])) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("NUMERIC scale %d must be between 0 and precision %d", tl[1], tl[0]))); - typmod = (int32)(((uint32)(tl[0]) << 16) | (uint32)(tl[1])) + VARHDRSZ; + } + typmod = (int32)(((uint32)(tl[0]) << 16) | (uint16)(tl[1])) + VARHDRSZ; } else if (n == 1) { if (tl[0] < 1 || tl[0] > NUMERIC_MAX_PRECISION) ereport(ERROR, @@ -871,14 +874,16 @@ Datum numerictypmodout(PG_FUNCTION_ARGS) const size_t len = 64; int32 typmod = PG_GETARG_INT32(0); char* res = (char*)palloc(len + 1); + int32 precision = (int32)((((uint32)(typmod - VARHDRSZ)) >> 16) & 0xffff); + int32 scale = (int16)(((uint32)(typmod - VARHDRSZ)) & 0xffff); + errno_t ret; if (typmod >= 0) { - errno_t ret = snprintf_s(res, - len + 1, - len, - "(%d,%d)", - (int32)((((uint32)(typmod - VARHDRSZ)) >> 16) & 0xffff), - (int32)(((uint32)(typmod - VARHDRSZ)) & 0xffff)); + if (scale != PG_INT16_MIN) { + ret = snprintf_s(res, len + 1, len, "(%d,%d)", precision, scale); + } else { + ret = snprintf_s(res, len + 1, len, "(%d)", precision); + } securec_check_ss(ret, "", ""); } else *res = '\0'; @@ -5224,60 +5229,110 @@ static void apply_typmod(NumericVar* var, int32 typmod) typmod -= VARHDRSZ; precision = (int32)(((uint32)(typmod) >> 16) & 0xffff); - scale = (int32)(((uint32)typmod) & 0xffff); - maxdigits = precision - scale; + scale = (int16)(((uint32)typmod) & 0xffff); + if (scale == PG_INT16_MIN && DB_IS_CMPT(A_FORMAT)) { + precision = ceil(log10(2) * precision); + + /* Round the float value to target precision (and set var->dscale) */ + round_float_var(var, precision); + } else { + maxdigits = precision - scale; - /* Round to target scale (and set var->dscale) */ - round_var(var, scale); + /* Round to target scale (and set var->dscale) */ + round_var(var, scale); - /* - * Check for overflow - note we can't do this before rounding, because - * rounding could raise the weight. Also note that the var's weight could - * be inflated by leading zeroes, which will be stripped before storage - * but perhaps might not have been yet. In any case, we must recognize a - * true zero, whose weight doesn't mean anything. - */ - ddigits = (var->weight + 1) * DEC_DIGITS; - if (ddigits > maxdigits) { - /* Determine true weight; and check for all-zero result */ - for (i = 0; i < var->ndigits; i++) { - NumericDigit dig = var->digits[i]; - - if (dig) { - /* Adjust for any high-order decimal zero digits */ -#if DEC_DIGITS == 4 - if (dig < 10) - ddigits -= 3; - else if (dig < 100) - ddigits -= 2; - else if (dig < 1000) - ddigits -= 1; -#elif DEC_DIGITS == 2 - if (dig < 10) - ddigits -= 1; -#elif DEC_DIGITS == 1 - /* no adjustment */ -#else -#error unsupported NBASE -#endif - if (ddigits > maxdigits) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("numeric field overflow"), - errdetail( - "A field with precision %d, scale %d must round to an absolute value less than %s%d.", - precision, - scale, - /* Display 10^0 as 1 */ - maxdigits ? "10^" : "", - maxdigits ? maxdigits : 1))); - break; + /* + * Check for overflow - note we can't do this before rounding, because + * rounding could raise the weight. Also note that the var's weight could + * be inflated by leading zeroes, which will be stripped before storage + * but perhaps might not have been yet. In any case, we must recognize a + * true zero, whose weight doesn't mean anything. + */ + ddigits = (var->weight + 1) * DEC_DIGITS; + if (ddigits > maxdigits) { + /* Determine true weight; and check for all-zero result */ + for (i = 0; i < var->ndigits; i++) { + NumericDigit dig = var->digits[i]; + + if (dig) { + /* Adjust for any high-order decimal zero digits */ + #if DEC_DIGITS == 4 + if (dig < 10) + ddigits -= 3; + else if (dig < 100) + ddigits -= 2; + else if (dig < 1000) + ddigits -= 1; + #elif DEC_DIGITS == 2 + if (dig < 10) + ddigits -= 1; + #elif DEC_DIGITS == 1 + /* no adjustment */ + #else + #error unsupported NBASE + #endif + if (ddigits > maxdigits) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("numeric field overflow"), + errdetail( + "A field with precision %d, scale %d must round to an absolute value less than %s%d.", + precision, + scale, + /* Display 10^0 as 1 */ + maxdigits ? "10^" : "", + maxdigits ? maxdigits : 1))); + break; + } + ddigits -= DEC_DIGITS; } - ddigits -= DEC_DIGITS; } } } +static void round_float_var(NumericVar* var, int precision) +{ + int32 exponent; + NumericVar denominator; + NumericVar significand; + int denom_scale; + int all_di; + + if (var->ndigits > 0) { + exponent = (var->weight + 1) * DEC_DIGITS; + + exponent -= DEC_DIGITS - (int)log10(var->digits[0]); + } else { + exponent = 0; + } + + if (exponent < 0) + denom_scale = -exponent; + else + denom_scale = 0; + + init_var(&denominator); + init_var(&significand); + + /* count all significant digits */ + all_di = exponent + var->dscale + 1; + + power_var_int(&const_ten, exponent, &denominator, denom_scale); + if (all_di >= precision) { + div_var(var, &denominator, &significand, precision - 1, true); + } else { + div_var(var, &denominator, &significand, all_di - 1, true); + } + if ((significand.dscale - exponent) >= 0) { + mul_var(&significand, &denominator, var, significand.dscale - exponent); + } else { + mul_var(&significand, &denominator, var, 0); + } + + free_var(&denominator); + free_var(&significand); +} + /* * Convert numeric to int8, rounding if needed. * @@ -7613,8 +7668,7 @@ static void round_var(NumericVar* var, int rscale) int ndigits; int carry; - var->dscale = rscale; - + var->dscale = rscale >= 0 ? rscale : 0; /* decimal digits wanted */ di = (var->weight + 1) * DEC_DIGITS + rscale; @@ -18823,7 +18877,7 @@ static inline int make_short_numeric_of_int64_minval(_out_ Numeric result, _in_ static inline int make_short_numeric_of_zero(Numeric result, int typmod) { /// set display scale if typmod is given, otherwise is 0 at default. - int dscale = (typmod >= (int32)(VARHDRSZ)) ? ((typmod - VARHDRSZ) & 0xffff) : 0; + int dscale = (typmod >= (int32)(VARHDRSZ)) ? (int16)((typmod - VARHDRSZ) & 0xffff) : 0; SET_VARSIZE(result, NUMERIC_HDRSZ_SHORT); // length info result->choice.n_short.n_header = NUMERIC_SHORT // sign is NUMERIC_POS @@ -18912,7 +18966,7 @@ static inline int get_weight_from_ascale(int ndigits, int ascale) static int get_dscale_from_typmod(int typmod, int ascale, int last_item) { if (typmod >= (int32) (VARHDRSZ)) { - return (int32) ((uint32) (typmod - VARHDRSZ) & 0xffff); + return (int16)((uint32) (typmod - VARHDRSZ) & 0xffff); } /* @@ -19267,7 +19321,7 @@ int convert_int64_to_short_numeric_byscale(_out_ char* outBuf, _in_ int128 v, _i int sign = NUMERIC_POS; int16 digits_buf[NUMERIC_NDIGITS_UPLIMITED]; - int scale = (int32)((uint32)(typmod - VARHDRSZ) & 0xffff); + int scale = (int16)((uint32)(typmod - VARHDRSZ) & 0xffff); int scaleDiff = NUMERIC_SCALE_ADJUST(vscale) * DEC_DIGITS - vscale; Assert(scaleDiff >= 0 && scaleDiff <= MAXINT64DIGIT); v = v * ScaleMultipler[scaleDiff]; @@ -19504,7 +19558,7 @@ int convert_int128_to_short_numeric_byscale(_out_ char* outBuf, _in_ int128 v, _ v = multiple; }; - scale = (int32)((uint32)(typmod - VARHDRSZ) & 0xffff); + scale = (int16)((uint32)(typmod - VARHDRSZ) & 0xffff); ndigits = digits_buf + NUMERIC_NDIGITS_INT128_UPLIMITED - digits_ptr; weight = get_weight_from_ascale(ndigits, NUMERIC_SCALE_ADJUST(vscale)); diff --git a/src/common/backend/utils/adt/xml.cpp b/src/common/backend/utils/adt/xml.cpp index ab8d87967e..9a62a82ab5 100644 --- a/src/common/backend/utils/adt/xml.cpp +++ b/src/common/backend/utils/adt/xml.cpp @@ -3392,7 +3392,7 @@ static const char* map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) " \n" " \n", ((unsigned int)(typmod - VARHDRSZ) >> 16) & 0xffff, - (unsigned int)(typmod - VARHDRSZ) & 0xffff); + (short)((typmod - VARHDRSZ) & 0xffff)); break; case INT2OID: diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 8e42331b02..3904cc8820 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -75,7 +75,7 @@ bool will_shutdown = false; * NEXT | 92899 | ? | ? * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92937; +const uint32 GRAND_VERSION_NUM = 92938; /******************************************** * 2.VERSION NUM FOR EACH FEATURE @@ -206,6 +206,7 @@ const uint32 SUBLINKPULLUP_VERSION_NUM = 92094; const uint32 PARTIALPUSH_VERSION_NUM = 92087; const uint32 CURSOR_EXPRESSION_VERSION_NUMBER = 92935; +const uint32 FLOAT_VERSION_NUMBER = 92938; /* This variable indicates wheather the instance is in progress of upgrade as a whole */ diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index 313b9fdb59..d63489db8d 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -395,7 +395,8 @@ static const struct behavior_compat_entry behavior_compat_options[OPT_MAX] = { {"proc_uncheck_default_param", OPT_PROC_UNCHECK_DEFAULT_PARAM}, {"update_unusable_unique_index_on_iud", OPT_UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD}, {"prefer_parse_cursor_parentheses_as_expr", OPT_PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR}, - {"update_global_index_on_partition_change", OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE} + {"update_global_index_on_partition_change", OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE}, + {"float_as_numeric", OPT_FLOAT_AS_NUMERIC} }; // increase SQL_IGNORE_STRATEGY_NUM if we need more strategy diff --git a/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp b/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp index 4fc72cb5b1..bbfa080c39 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp @@ -454,7 +454,7 @@ bool apply_typmod(NumericVar *var, int32 typmod, char *err_msg) typmod -= VARHDRSZ; precision = (typmod >> 16) & 0xffff; - scale = typmod & 0xffff; + scale = (int16)(typmod & 0xffff); maxdigits = precision - scale; /* Round to target scale (and set var->dscale) */ diff --git a/src/common/pl/plpgsql/src/pl_scanner.cpp b/src/common/pl/plpgsql/src/pl_scanner.cpp index 19d54fbe18..189ccdaf3a 100644 --- a/src/common/pl/plpgsql/src/pl_scanner.cpp +++ b/src/common/pl/plpgsql/src/pl_scanner.cpp @@ -488,7 +488,7 @@ void plpgsql_append_object_typename(StringInfo buf, PLpgSQL_type *var_type) char* precision = (char*)palloc(INT32_STRING_SIZE); char* scale = (char*)palloc(INT32_STRING_SIZE); pg_ltoa((int32)(((uint32)(typmod) >> 16) & 0xffff), precision); - pg_ltoa((int32)(((uint32)typmod) & 0xffff), scale); + pg_ltoa((int32)((int16)(typmod & 0xffff)), scale); appendBinaryStringInfo(buf, precision, strlen(precision)); appendBinaryStringInfo(buf, dot, 1); appendBinaryStringInfo(buf, scale, strlen(scale)); diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 228fda2d16..f6d569bad4 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -146,7 +146,7 @@ extern const uint32 PUBLICATION_DDL_VERSION_NUM; extern const uint32 PRIOR_EXPR_VERSION_NUM; extern const uint32 CURSOR_EXPRESSION_VERSION_NUMBER; extern const uint32 ROTATE_UNROTATE_VERSION_NUM; - +extern const uint32 FLOAT_VERSION_NUMBER; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); @@ -214,7 +214,8 @@ extern bool contain_backend_version(uint32 version_number); #define OPT_UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD (1LL << 30) #define OPT_PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR (1LL << 31) #define OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE (1LL << 32) -#define OPT_MAX 33 +#define OPT_FLOAT_AS_NUMERIC (1LL << 33) +#define OPT_MAX 34 #define PLPSQL_OPT_FOR_LOOP 1 #define PLPSQL_OPT_OUTPARAM 2 @@ -261,7 +262,7 @@ extern bool contain_backend_version(uint32 version_number); #define UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD (u_sess->utils_cxt.behavior_compat_flags & OPT_UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD) #define PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR (u_sess->utils_cxt.behavior_compat_flags & OPT_PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR) #define UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE (u_sess->utils_cxt.behavior_compat_flags & OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE) - +#define FLOAT_AS_NUMERIC (u_sess->utils_cxt.behavior_compat_flags & OPT_FLOAT_AS_NUMERIC) /* define database compatibility Attribute */ typedef struct { diff --git a/src/include/utils/numeric.h b/src/include/utils/numeric.h index 3d495bc04e..c7c33cb0ba 100644 --- a/src/include/utils/numeric.h +++ b/src/include/utils/numeric.h @@ -154,6 +154,8 @@ typedef struct NumericData* Numeric; * dscale values will fit in 14 bits. */ #define NUMERIC_MAX_PRECISION 1000 +#define NUMERIC_MAX_SCALE 1000 +#define NUMERIC_MIN_SCALE -84 /* * Internal limits on the scales chosen for calculation results diff --git a/src/test/regress/expected/test_float.out b/src/test/regress/expected/test_float.out new file mode 100644 index 0000000000..99aa7c424d --- /dev/null +++ b/src/test/regress/expected/test_float.out @@ -0,0 +1,160 @@ +create schema test_float; +set current_schema to test_float; +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +-- test normal functions +CREATE TABLE t1 (a float(1), b float(80), c float(126)); +CREATE TABLE t2 (a float); +\d t1; + Table "test_float.t1" + Column | Type | Modifiers +--------+------------+----------- + a | float(1) | + b | float(80) | + c | float(126) | + +\d t2; + Table "test_float.t2" + Column | Type | Modifiers +--------+------------+----------- + a | float(126) | + +DROP TABLE t2; +INSERT INTO t1 VALUES (0,0,0); +INSERT INTO t1 VALUES (123.4567890123456789012345678901234567890123,123.4567890123456789012345678901234567890123,123.4567890123456789012345678901234567890123); +INSERT INTO t1 VALUES (1234567890123456789012345678901234567890123,1234567890123456789012345678901234567890123,1234567890123456789012345678901234567890123); +SELECT * FROM t1; + a | b | c +---------------------------------------------+---------------------------------------------+--------------------------------------------- + 0 | 0 | 0 + 100 | 123.4567890123456789012346 | 123.45678901234567890123456789012345679 + 1000000000000000000000000000000000000000000 | 1234567890123456789012346000000000000000000 | 1234567890123456789012345678901234567900000 +(3 rows) + +DELETE FROM t1 where a = 123.4567890123456789012345678901234567890123; +DELETE FROM t1; +INSERT INTO t1 VALUES (NULL, NULL, NULL); +UPDATE t1 SET a = 999999999, b = 9999.99999999999999, c = 0.000000000999999999999999; +SELECT * FROM t1; + a | b | c +------------+---------------------+--------------------------- + 1000000000 | 9999.99999999999999 | .000000000999999999999999 +(1 row) + +DROP TABLE t1; +CREATE TABLE float_test ( + float2 FLOAT(2), + float10 FLOAT(10), + float20 FLOAT(20) +); +\d float_test; + Table "test_float.float_test" + Column | Type | Modifiers +---------+-----------+----------- + float2 | float(2) | + float10 | float(10) | + float20 | float(20) | + +INSERT INTO float_test (float2, float10, float20) VALUES (93.5, 93.5, 93.5); +INSERT INTO float_test (float2, float10, float20) VALUES (13884.2, 13884.2, 13884.2); +INSERT INTO float_test (float2, float10, float20) VALUES (123.456, 123.456, 123.456); +INSERT INTO float_test (float2, float10, float20) VALUES (0.00123, 0.00123, 0.00123); +INSERT INTO float_test (float2, float10, float20) VALUES (-93.5, -93.5, -93.5); +INSERT INTO float_test (float2, float10, float20) VALUES (-13884.2, -13884.2, -13884.2); +INSERT INTO float_test (float2, float10, float20) VALUES (-123.456, -123.456, -123.456); +INSERT INTO float_test (float2, float10, float20) VALUES (-0.00123, -0.00123, -0.00123); +SELECT * FROM float_test; + float2 | float10 | float20 +--------+---------+---------- + 90 | 93.5 | 93.5 + 10000 | 13880 | 13884.2 + 100 | 123.5 | 123.456 + .001 | .00123 | .00123 + -90 | -93.5 | -93.5 + -10000 | -13880 | -13884.2 + -100 | -123.5 | -123.456 + -.001 | -.00123 | -.00123 +(8 rows) + +DROP TABLE float_test; +-- boundary test +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +CREATE TABLE t1(a float(0)); +ERROR: precision for type float must be at least 1 bit +LINE 1: CREATE TABLE t1(a float(0)); + ^ +CREATE TABLE t1(a float(-432)); +ERROR: syntax error at or near "-" +LINE 1: CREATE TABLE t1(a float(-432)); + ^ +CREATE TABLE t1(a float(127)); +ERROR: precision for type float must be less than 127 bits +LINE 1: CREATE TABLE t1(a float(127)); + ^ +CREATE TABLE t1(a float(1277)); +ERROR: precision for type float must be less than 127 bits +LINE 1: CREATE TABLE t1(a float(1277)); + ^ +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +CREATE TABLE t1(a float(1), b float(126), c numeric); +INSERT INTO t1(a) VALUES (222^222222222222222); +ERROR: value out of range: overflow +CONTEXT: referenced column: a +INSERT INTO t1(b) VALUES (222^222222222222222); +ERROR: value out of range: overflow +CONTEXT: referenced column: b +INSERT INTO t1(c) VALUES (222^222222222222222); +ERROR: value out of range: overflow +CONTEXT: referenced column: c +INSERT INTO t1(a) VALUES (-222^222222222222222); +ERROR: value out of range: overflow +CONTEXT: referenced column: a +INSERT INTO t1(b) VALUES (-222^222222222222222); +ERROR: value out of range: overflow +CONTEXT: referenced column: b +INSERT INTO t1(c) VALUES (-222^222222222222222); +ERROR: value out of range: overflow +CONTEXT: referenced column: c +INSERT INTO t1 SELECT null, null, null; +INSERT INTO t1 SELECT 'NaN', 'NaN', 'NaN'; +SELECT * FROM t1; + a | b | c +-----+-----+----- + | | + NaN | NaN | NaN +(2 rows) + +DROP TABLE t1; +-- PL/SQL test +CREATE OR REPLACE PACKAGE pak1 as +var1 float(16); +var2 float(120); +type tp_tb1 is table of var1%type; +tb1 tp_tb1; +type tp_tb2 is table of var2%type; +tb2 tp_tb2; +procedure p1; +end pak1; +/ +CREATE OR REPLACE package body pak1 as +procedure p1 as +begin +tb1 = tp_tb1(1234244, 12.32432456, 0.00000002342, -32994, -23.000345, -0.32424234); +raise info '%', tb1; +tb2 = tp_tb2(1234244, 12.32432456, 0.00000002342, -32994, -23.000345, -0.32424234); +raise info '%', tb2; +end; +end pak1; +/ +call pak1.p1(); +INFO: {1234200,12.324,.00000002342,-32994,-23,-.32424} +INFO: {1234244,12.32432456,.00000002342,-32994,-23.000345,-.32424234} + p1 +---- + +(1 row) + +DROP PACKAGE pak1; +NOTICE: drop cascades to function test_float.p1() +reset behavior_compat_options; +reset current_schema; +drop schema test_float cascade; diff --git a/src/test/regress/expected/test_numeric_with_neg_scale.out b/src/test/regress/expected/test_numeric_with_neg_scale.out new file mode 100644 index 0000000000..3b795b769a --- /dev/null +++ b/src/test/regress/expected/test_numeric_with_neg_scale.out @@ -0,0 +1,311 @@ +create schema numeric_negative_scale_test; +set current_schema to numeric_negative_scale_test; +set behavior_compat_options = 'truncate_numeric_tail_zero'; +-- test normal functions(valid values) +CREATE TABLE t1(a numeric(4,-3), b numeric(5,-2), c numeric(6, -1)); +CREATE TABLE t2(a numeric(3,-4), b numeric(2,-5), c numeric(1, -6)); +CREATE TABLE t3(a numeric(5, 7), b numeric(2, 4), c numeric(10, 12)); +\d t1; +Table "numeric_negative_scale_test.t1" + Column | Type | Modifiers +--------+---------------+----------- + a | numeric(4,-3) | + b | numeric(5,-2) | + c | numeric(6,-1) | + +\d t2; +Table "numeric_negative_scale_test.t2" + Column | Type | Modifiers +--------+---------------+----------- + a | numeric(3,-4) | + b | numeric(2,-5) | + c | numeric(1,-6) | + +\d t3; +Table "numeric_negative_scale_test.t3" + Column | Type | Modifiers +--------+----------------+----------- + a | numeric(5,7) | + b | numeric(2,4) | + c | numeric(10,12) | + +INSERT INTO t1 VALUES (1234567, 1234567, 1234567); +INSERT INTO t1 VALUES (123.9435435, 123.9435435, 123.9435435); +INSERT INTO t1 VALUES (0.8293453, 0.8293453, 0.8293453); +INSERT INTO t1 VALUES (-1234567, -1234567, -1234567); +INSERT INTO t1 VALUES (-123.9435435, -123.9435435, -123.9435435); +INSERT INTO t1 VALUES (-0.8293453, -0.8293453, -0.8293453); +INSERT INTO t1 VALUES ('NAN', 'NAN', 'NAN'); +INSERT INTO t2 VALUES (8452345, 8452345, 8452345); +INSERT INTO t2 VALUES (164523.021354, 164523.021354, 164523.021354); +INSERT INTO t2 VALUES (0.02218383, 0.02218383, 0.02218383); +INSERT INTO t2 VALUES (-8452345, -8452345, -8452345); +INSERT INTO t2 VALUES (-164523.021354, -164523.021354, -164523.021354); +INSERT INTO t2 VALUES (-0.02218383, -0.02218383, -0.02218383); +INSERT INTO t2 VALUES ('NAN', 'NAN', 'NAN'); +INSERT INTO t3 VALUES (0.002343544, 0.002343544, 0.002343544); +INSERT INTO t3 VALUES (0.00943244, 0.00943244, 0.00943244); +INSERT INTO t3 VALUES (-0.002343544, -0.002343544, -0.002343544); +INSERT INTO t3 VALUES (-0.00943244, -0.00943244, -0.00943244); +INSERT INTO t3 VALUES ('NAN', 'NAN', 'NAN'); +SELECT * FROM t1; + a | b | c +----------+----------+---------- + 1235000 | 1234600 | 1234570 + 0 | 100 | 120 + 0 | 0 | 0 + -1235000 | -1234600 | -1234570 + 0 | -100 | -120 + 0 | 0 | 0 + NaN | NaN | NaN +(7 rows) + +SELECT * FROM t2; + a | b | c +----------+----------+---------- + 8450000 | 8500000 | 8000000 + 160000 | 200000 | 0 + 0 | 0 | 0 + -8450000 | -8500000 | -8000000 + -160000 | -200000 | 0 + 0 | 0 | 0 + NaN | NaN | NaN +(7 rows) + +SELECT * FROM t3; + a | b | c +-----------+--------+------------- + .0023435 | .0023 | .002343544 + .0094324 | .0094 | .00943244 + -.0023435 | -.0023 | -.002343544 + -.0094324 | -.0094 | -.00943244 + NaN | NaN | NaN +(5 rows) + +UPDATE t1 SET a = 1999999, b = 1999999 where c = 'NAN'; +UPDATE t2 SET a = 1999999, b = 1999999 where c = 'NAN'; +UPDATE t3 SET a = 0.00123458345, b = 0.00395345 where c = 'NAN'; +SELECT * FROM t1, t2, t3 where t1.c='NAN' and t2.c='NAN' and t3.c='NAN'; + a | b | c | a | b | c | a | b | c +---------+---------+-----+---------+---------+-----+----------+------+----- + 2000000 | 2000000 | NaN | 2000000 | 2000000 | NaN | .0012346 | .004 | NaN +(1 row) + +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +-- test normal functions(invalid values) +INSERT INTO t1(a) VALUES (222222222.22222); +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale -3 must round to an absolute value less than 10^7. +CONTEXT: referenced column: a +INSERT INTO t1(b) VALUES (222222222.22222); +ERROR: numeric field overflow +DETAIL: A field with precision 5, scale -2 must round to an absolute value less than 10^7. +CONTEXT: referenced column: b +INSERT INTO t1(c) VALUES (222222222.22222); +ERROR: numeric field overflow +DETAIL: A field with precision 6, scale -1 must round to an absolute value less than 10^7. +CONTEXT: referenced column: c +INSERT INTO t1(a) VALUES (9999599); +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale -3 must round to an absolute value less than 10^7. +CONTEXT: referenced column: a +INSERT INTO t1(b) VALUES (9999959); +ERROR: numeric field overflow +DETAIL: A field with precision 5, scale -2 must round to an absolute value less than 10^7. +CONTEXT: referenced column: b +INSERT INTO t1(c) VALUES (9999995); +ERROR: numeric field overflow +DETAIL: A field with precision 6, scale -1 must round to an absolute value less than 10^7. +CONTEXT: referenced column: c +INSERT INTO t2(a) VALUES (222222222.22222); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale -4 must round to an absolute value less than 10^7. +CONTEXT: referenced column: a +INSERT INTO t2(b) VALUES (222222222.22222); +ERROR: numeric field overflow +DETAIL: A field with precision 2, scale -5 must round to an absolute value less than 10^7. +CONTEXT: referenced column: b +INSERT INTO t2(c) VALUES (222222222.22222); +ERROR: numeric field overflow +DETAIL: A field with precision 1, scale -6 must round to an absolute value less than 10^7. +CONTEXT: referenced column: c +INSERT INTO t2(a) VALUES (9995999); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale -4 must round to an absolute value less than 10^7. +CONTEXT: referenced column: a +INSERT INTO t2(b) VALUES (9959999); +ERROR: numeric field overflow +DETAIL: A field with precision 2, scale -5 must round to an absolute value less than 10^7. +CONTEXT: referenced column: b +INSERT INTO t2(c) VALUES (9599999); +ERROR: numeric field overflow +DETAIL: A field with precision 1, scale -6 must round to an absolute value less than 10^7. +CONTEXT: referenced column: c +INSERT INTO t3(a) VALUES (0.123234214); +ERROR: numeric field overflow +DETAIL: A field with precision 5, scale 7 must round to an absolute value less than 10^-2. +CONTEXT: referenced column: a +INSERT INTO t3(b) VALUES (0.123234214); +ERROR: numeric field overflow +DETAIL: A field with precision 2, scale 4 must round to an absolute value less than 10^-2. +CONTEXT: referenced column: b +INSERT INTO t3(c) VALUES (0.123234214); +ERROR: numeric field overflow +DETAIL: A field with precision 10, scale 12 must round to an absolute value less than 10^-2. +CONTEXT: referenced column: c +INSERT INTO t3(a) VALUES (0.00999995); +ERROR: numeric field overflow +DETAIL: A field with precision 5, scale 7 must round to an absolute value less than 10^-2. +CONTEXT: referenced column: a +INSERT INTO t3(b) VALUES (0.00995); +ERROR: numeric field overflow +DETAIL: A field with precision 2, scale 4 must round to an absolute value less than 10^-2. +CONTEXT: referenced column: b +INSERT INTO t3(c) VALUES (0.0099999999995); +ERROR: numeric field overflow +DETAIL: A field with precision 10, scale 12 must round to an absolute value less than 10^-2. +CONTEXT: referenced column: c +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +-- boundary test +CREATE TABLE t1(a numeric(1,1000)); +CREATE TABLE t2(a numeric(1,-84)); +\d t1; +Table "numeric_negative_scale_test.t1" + Column | Type | Modifiers +--------+-----------------+----------- + a | numeric(1,1000) | + +\d t2; +Table "numeric_negative_scale_test.t2" + Column | Type | Modifiers +--------+----------------+----------- + a | numeric(1,-84) | + +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t3(a numeric(1,1001)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a numeric(1,1001)); + ^ +CREATE TABLE t3(a numeric(1,-85)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a numeric(1,-85)); + ^ +CREATE TABLE t3(a numeric(1,1001)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a numeric(1,1001)); + ^ +CREATE TABLE t3(a numeric(1,-85)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a numeric(1,-85)); + ^ +CREATE TABLE t3(a number(1,1001)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a number(1,1001)); + ^ +CREATE TABLE t3(a number(1,-85)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a number(1,-85)); + ^ +CREATE TABLE t3(a dec(1,1001)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a dec(1,1001)); + ^ +CREATE TABLE t3(a dec(1,-85)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a dec(1,-85)); + ^ +CREATE TABLE t3(a decimal(1,1001)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a decimal(1,1001)); + ^ +CREATE TABLE t3(a decimal(1,-85)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a decimal(1,-85)); + ^ +CREATE TABLE t3(a integer(1,1001)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a integer(1,1001)); + ^ +CREATE TABLE t3(a integer(1,-85)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a integer(1,-85)); + ^ +CREATE TABLE t3(a numeric(1,-32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a numeric(1,-32768)); + ^ +CREATE TABLE t3(a numeric(1,32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a numeric(1,32768)); + ^ +CREATE TABLE t3(a number(1,-32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a number(1,-32768)); + ^ +CREATE TABLE t3(a number(1,32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a number(1,32768)); + ^ +CREATE TABLE t3(a dec(1,-32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a dec(1,-32768)); + ^ +CREATE TABLE t3(a dec(1,32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a dec(1,32768)); + ^ +CREATE TABLE t3(a decimal(1,-32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a decimal(1,-32768)); + ^ +CREATE TABLE t3(a decimal(1,32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a decimal(1,32768)); + ^ +CREATE TABLE t3(a integer(1,-32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a integer(1,-32768)); + ^ +CREATE TABLE t3(a integer(1,32768)); +ERROR: NUMERIC scale must be between -84 and 1000 +LINE 1: CREATE TABLE t3(a integer(1,32768)); + ^ +-- PL/SQL test +CREATE OR REPLACE PACKAGE pak1 as +var1 numeric(3,-4); +var2 numeric(5, 6); +type tp_tb1 is table of var1%type; +tb1 tp_tb1; +type tp_tb2 is table of var2%type; +tb2 tp_tb2; +procedure p1; +end pak1; +/ +CREATE OR REPLACE package body pak1 as +procedure p1 as +begin +tb1 = tp_tb1(1235234, 3241235.32432456, 0.00000002342, -1235234, -3241235.32432456, -0.00000002342); +raise info '%', tb1; +tb2 = tp_tb2(0.0123456878, 0.000565293244, -0.0123456878, -0.000565293244); +raise info '%', tb2; +end; +end pak1; +/ +call pak1.p1(); +INFO: {1240000,3240000,0,-1240000,-3240000,0} +INFO: {.012346,.000565,-.012346,-.000565} + p1 +---- + +(1 row) + +DROP PACKAGE pak1; +NOTICE: drop cascades to function numeric_negative_scale_test.p1() +reset behavior_compat_options; +reset current_schema; +drop schema numeric_negative_scale_test cascade; diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index be351e8af4..ea68f5f4e5 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -469,6 +469,8 @@ test: select_into subselect_part2 gs_aggregate test: holdable_cursor cursor_expression cursor_expression_dump #test: portals_p2 window tsearch temp__6 col_subplan_base_2 +test: test_float test_numeric_with_neg_scale + test: alter_table_000 alter_table_002 alter_table_003 alter_table_modify #test: alter_table_001 alter_table_modify_ustore test: alter_table_modify_ltt alter_table_modify_gtt table_constraint diff --git a/src/test/regress/sql/test_float.sql b/src/test/regress/sql/test_float.sql new file mode 100644 index 0000000000..7d43409ced --- /dev/null +++ b/src/test/regress/sql/test_float.sql @@ -0,0 +1,86 @@ +create schema test_float; +set current_schema to test_float; + +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +-- test normal functions +CREATE TABLE t1 (a float(1), b float(80), c float(126)); +CREATE TABLE t2 (a float); +\d t1; +\d t2; +DROP TABLE t2; +INSERT INTO t1 VALUES (0,0,0); +INSERT INTO t1 VALUES (123.4567890123456789012345678901234567890123,123.4567890123456789012345678901234567890123,123.4567890123456789012345678901234567890123); +INSERT INTO t1 VALUES (1234567890123456789012345678901234567890123,1234567890123456789012345678901234567890123,1234567890123456789012345678901234567890123); +SELECT * FROM t1; +DELETE FROM t1 where a = 123.4567890123456789012345678901234567890123; +DELETE FROM t1; +INSERT INTO t1 VALUES (NULL, NULL, NULL); +UPDATE t1 SET a = 999999999, b = 9999.99999999999999, c = 0.000000000999999999999999; +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE float_test ( + float2 FLOAT(2), + float10 FLOAT(10), + float20 FLOAT(20) +); +\d float_test; +INSERT INTO float_test (float2, float10, float20) VALUES (93.5, 93.5, 93.5); +INSERT INTO float_test (float2, float10, float20) VALUES (13884.2, 13884.2, 13884.2); +INSERT INTO float_test (float2, float10, float20) VALUES (123.456, 123.456, 123.456); +INSERT INTO float_test (float2, float10, float20) VALUES (0.00123, 0.00123, 0.00123); +INSERT INTO float_test (float2, float10, float20) VALUES (-93.5, -93.5, -93.5); +INSERT INTO float_test (float2, float10, float20) VALUES (-13884.2, -13884.2, -13884.2); +INSERT INTO float_test (float2, float10, float20) VALUES (-123.456, -123.456, -123.456); +INSERT INTO float_test (float2, float10, float20) VALUES (-0.00123, -0.00123, -0.00123); +SELECT * FROM float_test; +DROP TABLE float_test; + +-- boundary test +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +CREATE TABLE t1(a float(0)); +CREATE TABLE t1(a float(-432)); +CREATE TABLE t1(a float(127)); +CREATE TABLE t1(a float(1277)); + +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +CREATE TABLE t1(a float(1), b float(126), c numeric); +INSERT INTO t1(a) VALUES (222^222222222222222); +INSERT INTO t1(b) VALUES (222^222222222222222); +INSERT INTO t1(c) VALUES (222^222222222222222); +INSERT INTO t1(a) VALUES (-222^222222222222222); +INSERT INTO t1(b) VALUES (-222^222222222222222); +INSERT INTO t1(c) VALUES (-222^222222222222222); +INSERT INTO t1 SELECT null, null, null; +INSERT INTO t1 SELECT 'NaN', 'NaN', 'NaN'; +SELECT * FROM t1; +DROP TABLE t1; + +-- PL/SQL test + +CREATE OR REPLACE PACKAGE pak1 as +var1 float(16); +var2 float(120); +type tp_tb1 is table of var1%type; +tb1 tp_tb1; +type tp_tb2 is table of var2%type; +tb2 tp_tb2; +procedure p1; +end pak1; +/ +CREATE OR REPLACE package body pak1 as +procedure p1 as +begin +tb1 = tp_tb1(1234244, 12.32432456, 0.00000002342, -32994, -23.000345, -0.32424234); +raise info '%', tb1; +tb2 = tp_tb2(1234244, 12.32432456, 0.00000002342, -32994, -23.000345, -0.32424234); +raise info '%', tb2; +end; +end pak1; +/ +call pak1.p1(); +DROP PACKAGE pak1; + +reset behavior_compat_options; +reset current_schema; +drop schema test_float cascade; \ No newline at end of file diff --git a/src/test/regress/sql/test_numeric_with_neg_scale.sql b/src/test/regress/sql/test_numeric_with_neg_scale.sql new file mode 100644 index 0000000000..7b57dae6f0 --- /dev/null +++ b/src/test/regress/sql/test_numeric_with_neg_scale.sql @@ -0,0 +1,123 @@ +create schema numeric_negative_scale_test; +set current_schema to numeric_negative_scale_test; + +set behavior_compat_options = 'truncate_numeric_tail_zero'; +-- test normal functions(valid values) +CREATE TABLE t1(a numeric(4,-3), b numeric(5,-2), c numeric(6, -1)); +CREATE TABLE t2(a numeric(3,-4), b numeric(2,-5), c numeric(1, -6)); +CREATE TABLE t3(a numeric(5, 7), b numeric(2, 4), c numeric(10, 12)); +\d t1; +\d t2; +\d t3; + +INSERT INTO t1 VALUES (1234567, 1234567, 1234567); +INSERT INTO t1 VALUES (123.9435435, 123.9435435, 123.9435435); +INSERT INTO t1 VALUES (0.8293453, 0.8293453, 0.8293453); +INSERT INTO t1 VALUES (-1234567, -1234567, -1234567); +INSERT INTO t1 VALUES (-123.9435435, -123.9435435, -123.9435435); +INSERT INTO t1 VALUES (-0.8293453, -0.8293453, -0.8293453); +INSERT INTO t1 VALUES ('NAN', 'NAN', 'NAN'); +INSERT INTO t2 VALUES (8452345, 8452345, 8452345); +INSERT INTO t2 VALUES (164523.021354, 164523.021354, 164523.021354); +INSERT INTO t2 VALUES (0.02218383, 0.02218383, 0.02218383); +INSERT INTO t2 VALUES (-8452345, -8452345, -8452345); +INSERT INTO t2 VALUES (-164523.021354, -164523.021354, -164523.021354); +INSERT INTO t2 VALUES (-0.02218383, -0.02218383, -0.02218383); +INSERT INTO t2 VALUES ('NAN', 'NAN', 'NAN'); +INSERT INTO t3 VALUES (0.002343544, 0.002343544, 0.002343544); +INSERT INTO t3 VALUES (0.00943244, 0.00943244, 0.00943244); +INSERT INTO t3 VALUES (-0.002343544, -0.002343544, -0.002343544); +INSERT INTO t3 VALUES (-0.00943244, -0.00943244, -0.00943244); +INSERT INTO t3 VALUES ('NAN', 'NAN', 'NAN'); +SELECT * FROM t1; +SELECT * FROM t2; +SELECT * FROM t3; +UPDATE t1 SET a = 1999999, b = 1999999 where c = 'NAN'; +UPDATE t2 SET a = 1999999, b = 1999999 where c = 'NAN'; +UPDATE t3 SET a = 0.00123458345, b = 0.00395345 where c = 'NAN'; +SELECT * FROM t1, t2, t3 where t1.c='NAN' and t2.c='NAN' and t3.c='NAN'; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; + +-- test normal functions(invalid values) +INSERT INTO t1(a) VALUES (222222222.22222); +INSERT INTO t1(b) VALUES (222222222.22222); +INSERT INTO t1(c) VALUES (222222222.22222); +INSERT INTO t1(a) VALUES (9999599); +INSERT INTO t1(b) VALUES (9999959); +INSERT INTO t1(c) VALUES (9999995); +INSERT INTO t2(a) VALUES (222222222.22222); +INSERT INTO t2(b) VALUES (222222222.22222); +INSERT INTO t2(c) VALUES (222222222.22222); +INSERT INTO t2(a) VALUES (9995999); +INSERT INTO t2(b) VALUES (9959999); +INSERT INTO t2(c) VALUES (9599999); +INSERT INTO t3(a) VALUES (0.123234214); +INSERT INTO t3(b) VALUES (0.123234214); +INSERT INTO t3(c) VALUES (0.123234214); +INSERT INTO t3(a) VALUES (0.00999995); +INSERT INTO t3(b) VALUES (0.00995); +INSERT INTO t3(c) VALUES (0.0099999999995); +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; + +-- boundary test +CREATE TABLE t1(a numeric(1,1000)); +CREATE TABLE t2(a numeric(1,-84)); +\d t1; +\d t2; +DROP TABLE t1; +DROP TABLE t2; + +CREATE TABLE t3(a numeric(1,1001)); +CREATE TABLE t3(a numeric(1,-85)); +CREATE TABLE t3(a numeric(1,1001)); +CREATE TABLE t3(a numeric(1,-85)); +CREATE TABLE t3(a number(1,1001)); +CREATE TABLE t3(a number(1,-85)); +CREATE TABLE t3(a dec(1,1001)); +CREATE TABLE t3(a dec(1,-85)); +CREATE TABLE t3(a decimal(1,1001)); +CREATE TABLE t3(a decimal(1,-85)); +CREATE TABLE t3(a integer(1,1001)); +CREATE TABLE t3(a integer(1,-85)); +CREATE TABLE t3(a numeric(1,-32768)); +CREATE TABLE t3(a numeric(1,32768)); +CREATE TABLE t3(a number(1,-32768)); +CREATE TABLE t3(a number(1,32768)); +CREATE TABLE t3(a dec(1,-32768)); +CREATE TABLE t3(a dec(1,32768)); +CREATE TABLE t3(a decimal(1,-32768)); +CREATE TABLE t3(a decimal(1,32768)); +CREATE TABLE t3(a integer(1,-32768)); +CREATE TABLE t3(a integer(1,32768)); +-- PL/SQL test + +CREATE OR REPLACE PACKAGE pak1 as +var1 numeric(3,-4); +var2 numeric(5, 6); +type tp_tb1 is table of var1%type; +tb1 tp_tb1; +type tp_tb2 is table of var2%type; +tb2 tp_tb2; +procedure p1; +end pak1; +/ +CREATE OR REPLACE package body pak1 as +procedure p1 as +begin +tb1 = tp_tb1(1235234, 3241235.32432456, 0.00000002342, -1235234, -3241235.32432456, -0.00000002342); +raise info '%', tb1; +tb2 = tp_tb2(0.0123456878, 0.000565293244, -0.0123456878, -0.000565293244); +raise info '%', tb2; +end; +end pak1; +/ +call pak1.p1(); +DROP PACKAGE pak1; + +reset behavior_compat_options; +reset current_schema; +drop schema numeric_negative_scale_test cascade; \ No newline at end of file -- Gitee From 52668cbc9f2755705ed477aa039fa307f1f68b84 Mon Sep 17 00:00:00 2001 From: yaoxin Date: Wed, 3 Jul 2024 17:10:09 +0800 Subject: [PATCH 013/347] =?UTF-8?q?=E5=A4=8Dselect=20for=20update/share?= =?UTF-8?q?=E8=BF=87=E7=A8=8B=E4=B8=AD=E8=A1=8C=E7=BA=A7=E9=94=81=E8=A2=AB?= =?UTF-8?q?=E6=B8=85=E7=90=86=E5=AF=BC=E8=87=B4=E5=B9=B6=E5=8F=91=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E5=BC=82=E5=B8=B8=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/ustore/knl_uheap.cpp | 4 ++-- src/gausskernel/storage/access/ustore/knl_uvisibility.cpp | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 029e20f427..1aa865ff31 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -531,7 +531,6 @@ void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fxid, Pag static ShortTransactionId UHeapTupleSetModifiedXid(Relation relation, Buffer buffer, UHeapTuple utuple, TransactionId xid) { - Assert(!UHEAP_XID_IS_LOCK(utuple->disk_tuple->flag)); TransactionId xidbase = InvalidTransactionId; ShortTransactionId tupleXid = 0; UHeapTupleCopyBaseFromPage(utuple, BufferGetPage(buffer)); @@ -3110,8 +3109,10 @@ check_tup_satisfies_update: /* oldtup should be pointing to right place in page */ Assert(oldtup.disk_tuple == (UHeapDiskTuple)UPageGetRowData(page, lp)); + int16 tmpLockInfo = oldtup.disk_tuple->flag & SINGLE_LOCKER_INFOMASK; UHeapTupleHeaderSetTDSlot(oldtup.disk_tuple, oldtupNewTransSlot); oldtup.disk_tuple->flag &= ~UHEAP_VIS_STATUS_MASK; + oldtup.disk_tuple->flag |= tmpLockInfo; oldtup.disk_tuple->flag |= infomaskOldTuple; tupleXid = UHeapTupleSetModifiedXid(relation, buffer, &oldtup, fxid); @@ -3121,7 +3122,6 @@ check_tup_satisfies_update: uheaptup->disk_tuple->flag |= infomaskNewTuple; uheaptup->xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; if (buffer == newbuf) { - Assert(!UHEAP_XID_IS_LOCK(uheaptup->disk_tuple->flag)); uheaptup->disk_tuple->flag |= SINGLE_LOCKER_XID_IS_TRANS; UHeapTupleSetRawXid(uheaptup, tupleXid); } else { diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index b5340c9984..0fe94fd285 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -73,7 +73,6 @@ static UHeapTupleStatus UHeapTupleGetStatus(const UHeapTuple utup) return UHEAPTUPLESTATUS_MULTI_LOCKED; } else if ((SINGLE_LOCKER_XID_IS_EXCL_LOCKED(infomask) || SINGLE_LOCKER_XID_IS_SHR_LOCKED(infomask)) && TransactionIdIsNormal(locker) && !TransactionIdOlderThanFrozenXid(locker)) { - Assert(!UHEAP_XID_IS_TRANS(utuple->flag)); return UHEAPTUPLESTATUS_LOCKED; // locked by select-for-update or select-for-share } else if (infomask & UHEAP_INPLACE_UPDATED) { return UHEAPTUPLESTATUS_INPLACE_UPDATED; // modified or locked by lock-for-update -- Gitee From d0020da14d7962d6dfbe22824c8e3b18f81419a3 Mon Sep 17 00:00:00 2001 From: yaoxin Date: Thu, 4 Jul 2024 20:35:59 +0800 Subject: [PATCH 014/347] support vacuum full --- src/gausskernel/optimizer/commands/cluster.cpp | 3 +++ src/gausskernel/optimizer/commands/vacuum.cpp | 10 ++-------- .../storage/access/heap/rewriteheap.cpp | 2 +- .../timecapsule_partition_ustore_test_1.source | 1 - ...ustore_subpartition_vacuum_partition.source | 18 +++++++++++++++--- 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/gausskernel/optimizer/commands/cluster.cpp b/src/gausskernel/optimizer/commands/cluster.cpp index d4d62aa085..ecac42c7c5 100755 --- a/src/gausskernel/optimizer/commands/cluster.cpp +++ b/src/gausskernel/optimizer/commands/cluster.cpp @@ -2403,6 +2403,8 @@ static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, int * Hence freeze xid should be CurrentTransactionId */ FreezeXid = GetCurrentTransactionId(); + OldestXmin = pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid); + MultiXactFrzLimit = GetOldestMultiXactId(); } /* return selected value to caller */ @@ -2567,6 +2569,7 @@ static void copyPartitionHeapData(Relation newHeap, Relation oldHeap, Oid indexO */ freezeXid = GetCurrentTransactionId(); freezeMulti = GetOldestMultiXactId(); + oldestXmin = pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid); } /* return selected value to caller */ diff --git a/src/gausskernel/optimizer/commands/vacuum.cpp b/src/gausskernel/optimizer/commands/vacuum.cpp index bc82cd65a0..df6f632ec7 100644 --- a/src/gausskernel/optimizer/commands/vacuum.cpp +++ b/src/gausskernel/optimizer/commands/vacuum.cpp @@ -2669,16 +2669,10 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) ereport(messageLevel, (errmsg("skipping \"%s\" --- foreign table does not support vacuum", RelationGetRelationName(onerel)))); } - } else if ((vacstmt->options & VACOPT_FULL) && RelationIsUstoreFormat(onerel)) { + } else if ((vacstmt->options & VACOPT_FULL) && (vacstmt->flags & VACFLG_SIMPLE_HEAP)) { #else - if ((vacstmt->options & VACOPT_FULL) && RelationIsUstoreFormat(onerel)) { + if ((vacstmt->options & VACOPT_FULL) && (vacstmt->flags & VACFLG_SIMPLE_HEAP)) { #endif - if(!OidIsValid(relationid)){ - ereport(INFO, (errmsg("skipping \"%s\" --- Don't vacuum full ustore table," - "this feature to be released in the future.", - RelationGetRelationName(onerel)))); - } - } else if ((vacstmt->options & VACOPT_FULL) && (vacstmt->flags & VACFLG_SIMPLE_HEAP)) { bool is_hdfs_rel = RelationIsPAXFormat(onerel); if (is_hdfs_rel) { ereport(LOG, (errmsg("vacuum full for DFS table: %s", onerel->rd_rel->relname.data))); diff --git a/src/gausskernel/storage/access/heap/rewriteheap.cpp b/src/gausskernel/storage/access/heap/rewriteheap.cpp index 63c8503908..5c7231abb9 100644 --- a/src/gausskernel/storage/access/heap/rewriteheap.cpp +++ b/src/gausskernel/storage/access/heap/rewriteheap.cpp @@ -1333,7 +1333,7 @@ static void RawUHeapInsert(RewriteState state, UHeapTuple tup) } else { uheaptup = tup; } - len = MAXALIGN(uheaptup->disk_tuple_size); /* be conservative */ + len = SHORTALIGN(uheaptup->disk_tuple_size); /* be conservative */ /* * If we're gonna fail for oversize tuple, do it right away */ diff --git a/src/test/regress/output/timecapsule_partition_ustore_test_1.source b/src/test/regress/output/timecapsule_partition_ustore_test_1.source index 454c833790..0e1639b708 100644 --- a/src/test/regress/output/timecapsule_partition_ustore_test_1.source +++ b/src/test/regress/output/timecapsule_partition_ustore_test_1.source @@ -598,7 +598,6 @@ select pg_sleep(4); insert into t_timecapsule_test_tmp select 8, now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn(); vacuum full PARTITION_TABLE_001; -INFO: skipping "partition_table_001" --- Don't vacuum full ustore table,this feature to be released in the future. drop table if exists PARTITION_TABLE_001 cascade; drop tablespace if exists example1; drop tablespace if exists example2; diff --git a/src/test/regress/output/ustore_subpartition_vacuum_partition.source b/src/test/regress/output/ustore_subpartition_vacuum_partition.source index 5cd2a67145..6607e71608 100644 --- a/src/test/regress/output/ustore_subpartition_vacuum_partition.source +++ b/src/test/regress/output/ustore_subpartition_vacuum_partition.source @@ -65,7 +65,19 @@ There were 0 unused item pointers. CPU 0.00s/0.00u sec elapsed 0.00 sec. --vacuum option all VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +--?INFO: vacuuming "ustore_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +--?INFO: vacuuming "ustore_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); +--?INFO: vacuuming "ustore_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. -- --2. test the actual work -- @@ -138,7 +150,7 @@ VACUUM FULL range_list_sales1 PARTITION (customer3); SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 122880 + 81920 (1 row) --delete & insert @@ -153,7 +165,7 @@ INSERT INTO range_list_sales1 SELECT generate_series(1,1000), SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 163840 + 122880 (1 row) --vacuum full subpartition @@ -168,7 +180,7 @@ VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 163840 + 81920 (1 row) --check index is ok -- Gitee From edeaefd5594880ab1d2c11730d1797d5cbf87d90 Mon Sep 17 00:00:00 2001 From: zhangwh Date: Fri, 5 Jul 2024 14:55:16 +0800 Subject: [PATCH 015/347] modify code --- .../storage/access/nbtree/nbtsearch.cpp | 66 +++++++------------ 1 file changed, 23 insertions(+), 43 deletions(-) diff --git a/src/gausskernel/storage/access/nbtree/nbtsearch.cpp b/src/gausskernel/storage/access/nbtree/nbtsearch.cpp index 3576785aa7..1d59776cf3 100644 --- a/src/gausskernel/storage/access/nbtree/nbtsearch.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtsearch.cpp @@ -442,6 +442,7 @@ int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum * Check tuple has correct number of attributes. */ _bt_check_natts_correct(rel, key->heapkeyspace, page, offnum); + /* * Force result ">" if target item is first data item on an internal page * --- see NOTE above. @@ -474,51 +475,30 @@ int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum datum = index_getattr(itup, scankey->sk_attno, itupdesc, &isNull); if (likely((!(scankey->sk_flags & SK_ISNULL)) && !isNull)) { - if (scankey->sk_flags & SK_BT_DESC) { - /* btint4cmp */ - if (scankey->sk_func.fn_oid == F_BTINT4CMP) { - result = (int32)datum == (int32)scankey->sk_argument - ? 0 - : ((int32)datum > (int32)scankey->sk_argument ? 1 : -1); - } else if (scankey->sk_func.fn_oid == F_BTINT8CMP) { - result = (int64)datum == (int64)scankey->sk_argument - ? 0 - : ((int64)datum > (int64)scankey->sk_argument ? 1 : -1); - } else if (scankey->sk_func.fn_oid == F_BTINT84CMP) { - result = (int64)datum == (int64)(int32)scankey->sk_argument - ? 0 - : ((int64)datum > (int64)(int32)scankey->sk_argument ? 1 : -1); - } else if (scankey->sk_func.fn_oid == F_BTINT48CMP) { - result = (int64)(int32)datum == (int64)scankey->sk_argument - ? 0 - : ((int64)(int32)datum > (int64)scankey->sk_argument ? 1 : -1); - } else { - result = DatumGetInt32( - FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument)); - } + int8 multiplier = (scankey->sk_flags & SK_BT_DESC) ? 1 : -1; + /* btint4cmp */ + if (scankey->sk_func.fn_oid == F_BTINT4CMP) { + result = (int32)datum == (int32)scankey->sk_argument + ? 0 + : ((int32)datum > (int32)scankey->sk_argument ? 1 : -1); + } else if (scankey->sk_func.fn_oid == F_BTINT8CMP) { + result = (int64)datum == (int64)scankey->sk_argument + ? 0 + : ((int64)datum > (int64)scankey->sk_argument ? 1 : -1); + } else if (scankey->sk_func.fn_oid == F_BTINT84CMP) { + result = (int64)datum == (int64)(int32)scankey->sk_argument + ? 0 + : ((int64)datum > (int64)(int32)scankey->sk_argument ? 1 : -1); + } else if (scankey->sk_func.fn_oid == F_BTINT48CMP) { + result = (int64)(int32)datum == (int64)scankey->sk_argument + ? 0 + : ((int64)(int32)datum > (int64)scankey->sk_argument ? 1 : -1); } else { - /* btint4cmp */ - if (scankey->sk_func.fn_oid == F_BTINT4CMP) { - result = (int32)datum == (int32)scankey->sk_argument - ? 0 - : ((int32)datum > (int32)scankey->sk_argument ? -1 : 1); - } else if (scankey->sk_func.fn_oid == F_BTINT8CMP) { - result = (int64)datum == (int64)scankey->sk_argument - ? 0 - : ((int64)datum > (int64)scankey->sk_argument ? -1 : 1); - } else if (scankey->sk_func.fn_oid == F_BTINT84CMP) { - result = (int64)datum == (int64)(int32)scankey->sk_argument - ? 0 - : ((int64)datum > (int64)(int32)scankey->sk_argument ? -1 : 1); - } else if (scankey->sk_func.fn_oid == F_BTINT48CMP) { - result = (int64)(int32)datum == (int64)scankey->sk_argument - ? 0 - : ((int64)(int32)datum > (int64)scankey->sk_argument ? -1 : 1); - } else { - result = -(DatumGetInt32( - FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument))); - } + result = DatumGetInt32( + FunctionCall2Coll(&scankey->sk_func, scankey->sk_collation, datum, scankey->sk_argument)); } + + result *= multiplier; } else { if (scankey->sk_flags & SK_ISNULL) { /* key is NULL */ if (isNull) -- Gitee From 43664fcb68006aac921f5ffa384a87552cfe6a12 Mon Sep 17 00:00:00 2001 From: sundechao Date: Fri, 5 Jul 2024 15:18:55 +0800 Subject: [PATCH 016/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9purge=20recyclebin?= =?UTF-8?q?=E6=9D=83=E9=99=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/tablecmds.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 8d7f7d942d..ea45df1670 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -32373,6 +32373,15 @@ void ExecutePurge(PurgeStmt *stmt) break; } case PURGE_RECYCLEBIN: { + Oid userId = GetUserId(); + /* + * Superusers bypass all permission checking. + * Database Security: Support seperation of privilege. + */ + if (!(superuser_arg(userId) || systemDBA_arg(userId))) { + ereport(ERROR, + (errmsg("Only superuser can do purge recyclebin operation."))); + } RbCltPurgeRecyclebin(); break; } -- Gitee From ce46eac2620147b15854c38a5da0649d09b4db2f Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Mon, 8 Jul 2024 09:57:11 +0800 Subject: [PATCH 017/347] =?UTF-8?q?pipelined=E9=9D=9E=E8=87=AA=E6=B2=BB?= =?UTF-8?q?=E4=BA=8B=E5=8A=A1=E8=B0=83=E7=94=A8DML=E8=AF=AD=E5=8F=A5?= =?UTF-8?q?=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/gram.y | 2 +- src/common/pl/plpgsql/src/pl_comp.cpp | 5 +++ src/common/pl/plpgsql/src/pl_exec.cpp | 6 +++ src/common/pl/plpgsql/src/pl_handler.cpp | 7 ++++ .../process/threadpool/knl_session.cpp | 1 + src/gausskernel/runtime/executor/spi.cpp | 15 +++++++ src/include/knl/knl_session.h | 1 + .../expected/plpgsql/plpgsql_normal.out | 26 ++++++++++++- .../expected/plpgsql/plpgsql_unsupported.out | 39 ++++++++++++++++--- .../regress/sql/plpgsql/plpgsql_normal.sql | 21 ++++++++++ .../sql/plpgsql/plpgsql_unsupported.sql | 24 ++++++++++++ 11 files changed, 140 insertions(+), 7 deletions(-) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index ca906be71c..b2ab0c606f 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -10825,7 +10825,7 @@ make_return_stmt(int location) const char* message = "pipe error"; InsertErrorMessage(message, plpgsql_yylloc); ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_SYNTAX_ERROR), - errmsg("RETURN statement in a pipelinedd function cannot contains an expression"), errdetail("%s", message), + errmsg("RETURN statement in a pipelined function cannot contains an expression"), errdetail("%s", message), errcause("A RETURN statement in a pipelined function contains an expression, \ which is not allowed. \ Pipelined functions must return values to the caller by using the PIPE statement."), diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index c4338498fe..fd71cf3c7d 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -1458,6 +1458,11 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, add_dummy_return(func); } + /* pipelined function: readonly */ + if (func->is_pipelined && !func->is_autonomous) { + func->fn_readonly = true; + } + /* * Complete the function's info */ diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index eebce3b71b..8eadae6da5 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1275,6 +1275,9 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, /* * Setup the execution state */ + + u_sess->plsql_cxt.is_exec_autonomous = true; + u_sess->plsql_cxt.is_pipelined = func->is_pipelined; plpgsql_estate_setup(&estate, func, (ReturnSetInfo*)fcinfo->resultinfo); func->debug = NULL; @@ -1584,6 +1587,9 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, /* * Setup the execution state */ + + u_sess->plsql_cxt.is_exec_autonomous = false; + u_sess->plsql_cxt.is_pipelined = func->is_pipelined; plpgsql_estate_setup(&estate, func, (ReturnSetInfo*)fcinfo->resultinfo); func->debug = NULL; diff --git a/src/common/pl/plpgsql/src/pl_handler.cpp b/src/common/pl/plpgsql/src/pl_handler.cpp index c4eed74ecb..314df2a13a 100755 --- a/src/common/pl/plpgsql/src/pl_handler.cpp +++ b/src/common/pl/plpgsql/src/pl_handler.cpp @@ -870,6 +870,7 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) Oid firstLevelPkgOid = InvalidOid; bool save_curr_status = GetCurrCompilePgObjStatus(); bool save_is_exec_autonomous = u_sess->plsql_cxt.is_exec_autonomous; + bool save_is_pipelined = u_sess->plsql_cxt.is_pipelined; PG_TRY(); { PGSTAT_START_PLSQL_TIME_RECORD(); @@ -1105,6 +1106,7 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) u_sess->plsql_cxt.need_create_depend = save_need_create_depend; SetCurrCompilePgObjStatus(save_curr_status); u_sess->plsql_cxt.is_exec_autonomous = save_is_exec_autonomous; + u_sess->plsql_cxt.is_pipelined = save_is_pipelined; /* clean stp save pointer if the outermost function is end. */ if (u_sess->SPI_cxt._connected == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = NULL; @@ -1132,6 +1134,8 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) } PG_END_TRY(); u_sess->plsql_cxt.need_create_depend = save_need_create_depend; + u_sess->plsql_cxt.is_exec_autonomous = save_is_exec_autonomous; + u_sess->plsql_cxt.is_pipelined = save_is_pipelined; /* clean stp save pointer if the outermost function is end. */ if (u_sess->SPI_cxt._connected == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = NULL; @@ -1256,6 +1260,7 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) /* Compile the anonymous code block */ PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + bool save_is_exec_autonomous = u_sess->plsql_cxt.is_exec_autonomous; int save_compile_status = getCompileStatus(); PG_TRY(); { @@ -1352,6 +1357,7 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) /* reset nest plpgsql compile */ u_sess->plsql_cxt.curr_compile_context = save_compile_context; u_sess->plsql_cxt.compile_status = save_compile_status; + u_sess->plsql_cxt.is_exec_autonomous = save_is_exec_autonomous; clearCompileContextList(save_compile_list_length); /* AutonomousSession Disconnecting and releasing resources */ DestoryAutonomousSession(true); @@ -1365,6 +1371,7 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) PG_RE_THROW(); } PG_END_TRY(); + u_sess->plsql_cxt.is_exec_autonomous = save_is_exec_autonomous; #ifndef ENABLE_MULTIPLE_NODES /* debug finished, close debug resource */ if (func->debug) { diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 82abc762af..2545a587c2 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -876,6 +876,7 @@ static void knl_u_plpgsql_init(knl_u_plpgsql_context* plsql_cxt) plsql_cxt->createPlsqlType = CREATE_PLSQL_TYPE_END; plsql_cxt->functionStyleType = FUNCTION_STYLE_TYPE_NONE; plsql_cxt->is_pkg_compile = false; + plsql_cxt->is_pipelined = false; plsql_cxt->isCreatePkg = false; plsql_cxt->isCreatePkgFunction = false; plsql_cxt->currCompilingObjStatus = true; diff --git a/src/gausskernel/runtime/executor/spi.cpp b/src/gausskernel/runtime/executor/spi.cpp index 79cd7af0f1..43168348cf 100644 --- a/src/gausskernel/runtime/executor/spi.cpp +++ b/src/gausskernel/runtime/executor/spi.cpp @@ -94,6 +94,18 @@ static bool _SPI_checktuples(void); extern void ClearVacuumStmt(VacuumStmt *stmt); static void CopySPI_Plan(SPIPlanPtr newplan, SPIPlanPtr plan, MemoryContext plancxt); +static void pipelined_readonly_ereport() +{ + if (u_sess->plsql_cxt.is_pipelined && !u_sess->plsql_cxt.is_exec_autonomous) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot perform a DML operation inside a query"), + errcause("DML operation like insert, update, delete or select-for-update cannot " + "be performed inside a query."), + erraction("Ensure that the offending DML operation is not performed or use an " + "autonomous transaction to perform the DML operation within the query."))); + } +} + /* =================== interface functions =================== */ int SPI_connect(CommandDest dest, void (*spiCallbackfn)(void *), void *clientData) { @@ -310,6 +322,7 @@ void SPI_stp_transaction_check(bool read_only, bool savepoint) #endif if (read_only) { + pipelined_readonly_ereport(); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("commit/rollback/savepoint is not allowed in a non-volatile function"))); /* translator: %s is a SQL statement name */ @@ -1794,6 +1807,7 @@ static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, ParamL Node *pstmt = (Node *)lfirst(lc); if (!CommandIsReadOnly(pstmt)) { + pipelined_readonly_ereport(); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ errmsg("%s is not allowed in a non-volatile function", CreateCommandTag(pstmt)), @@ -2799,6 +2813,7 @@ static int _SPI_execute_plan0(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot s } if (read_only && !CommandIsReadOnly(stmt)) { + pipelined_readonly_ereport(); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ errmsg("%s is not allowed in a non-volatile function", CreateCommandTag(stmt)))); diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index ac8ef4f289..a19e9c06e2 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -1713,6 +1713,7 @@ typedef struct knl_u_plpgsql_context { bool has_invalid_pkg; bool has_invalid_func; bool has_error; + bool is_pipelined; /* for readonly ereport */ bool is_exec_autonomous; bool in_package_function_compile; bool is_alter_compile_stmt; diff --git a/src/test/regress/expected/plpgsql/plpgsql_normal.out b/src/test/regress/expected/plpgsql/plpgsql_normal.out index 3d81a1953f..b07a56c433 100644 --- a/src/test/regress/expected/plpgsql/plpgsql_normal.out +++ b/src/test/regress/expected/plpgsql/plpgsql_normal.out @@ -300,9 +300,30 @@ select count(*) from stockpivot_pl(); 100 (1 row) +create table test(id int); +CREATE OR REPLACE FUNCTION insert_test() returns VOID LANGUAGE plpgsql AS +$BODY$ + DECLARE PRAGMA AUTONOMOUS_TRANSACTION; +begin + insert into test select * from get_table_of_int(1); +end; +$BODY$; +CREATE OR REPLACE FUNCTION get_tab_ptf_failed() returns t_tf_tab pipelined LANGUAGE plpgsql AS +$BODY$ +declare result t_tf_row; +begin + perform insert_test(); + insert into test values(5); +end; +$BODY$; +select get_tab_ptf_failed(); +ERROR: cannot perform a DML operation inside a query +CONTEXT: SQL statement "insert into test values(5)" +PL/pgSQL function get_tab_ptf_failed() line 5 at SQL statement +referenced column: get_tab_ptf_failed reset search_path; drop schema plpgsql_pipelined cascade; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 15 other objects DETAIL: drop cascades to type plpgsql_pipelined.t_tf_row drop cascades to type plpgsql_pipelined.t_tf_tab drop cascades to function plpgsql_pipelined.get_tab_ptf(numeric) @@ -315,3 +336,6 @@ drop cascades to function plpgsql_pipelined.do_nothing_func(numeric) drop cascades to function plpgsql_pipelined.fuc(integer) drop cascades to table plpgsql_pipelined.stocktable drop cascades to function plpgsql_pipelined.stockpivot_pl() +drop cascades to table plpgsql_pipelined.test +drop cascades to function plpgsql_pipelined.insert_test() +drop cascades to function plpgsql_pipelined.get_tab_ptf_failed() diff --git a/src/test/regress/expected/plpgsql/plpgsql_unsupported.out b/src/test/regress/expected/plpgsql/plpgsql_unsupported.out index c78ba16536..cce3c0a2a2 100644 --- a/src/test/regress/expected/plpgsql/plpgsql_unsupported.out +++ b/src/test/regress/expected/plpgsql/plpgsql_unsupported.out @@ -68,16 +68,16 @@ CREATE OR REPLACE FUNCTION func_test(n NUMBER) RETURN _int4 PIPELINED IS BEGIN return 1; END; - +/ +ERROR: RETURN statement in a pipelined function cannot contains an expression +DETAIL: pipe error +CONTEXT: compilation of PL/pgSQL function "func_test" near line 2 -- do not call return: success CREATE OR REPLACE FUNCTION func_test(n NUMBER) RETURN _int4 PIPELINED IS BEGIN /* do not return */ END; / -ERROR: RETURN statement in a pipelinedd function cannot contains an expression -DETAIL: pipe error -CONTEXT: compilation of PL/pgSQL function "func_test" near line 2 -- no pipelined flag in package body CREATE OR REPLACE PACKAGE unsupported_pkg AS TYPE numset_tbl IS TABLE OF NUMBER; @@ -190,11 +190,40 @@ alter function alter_func_pipelined(number) pipelined; ERROR: syntax error at or near "pipelined" LINE 1: alter function alter_func_pipelined(number) pipelined; ^ +create or replace type tb_type_0013 as table of varchar2(2000); +drop table if exists t_pipelined_0013; +NOTICE: table "t_pipelined_0013" does not exist, skipping +create table t_pipelined_0013(c1 int); +create or replace function func_pipelined_0013(count in number) +returns tb_type_0013 pipelined language plpgsql as +$BODY$ +declare +begin +for i in 1 .. count loop +insert into t_pipelined_0013 values(i); +-- pipe row( 'insert into test values( ' || i || ') success'); +perform pg_sleep(1); +update t_pipelined_0013 set c1 = 10 where c1 = i; +end loop; +-- pipe row( 'All done!' ); +return; +end; +$BODY$; +-- cannot perform a DML operation inside a query +select func_pipelined_0013(3); +ERROR: cannot perform a DML operation inside a query +CONTEXT: SQL statement "insert into t_pipelined_0013 values(i)" +PL/pgSQL function func_pipelined_0013(numeric) line 5 at SQL statement +referenced column: func_pipelined_0013 reset search_path; drop schema plpgsql_pipelined_unsupported cascade; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 9 other objects DETAIL: drop cascades to function plpgsql_pipelined_unsupported.func_with_out_param() drop cascades to function plpgsql_pipelined_unsupported.func_with_out_param_autonm() +drop cascades to function plpgsql_pipelined_unsupported.func_test(numeric) --?.* drop cascades to function plpgsql_pipelined_unsupported.func_test(numeric) drop cascades to function plpgsql_pipelined_unsupported.alter_func_pipelined(numeric) +drop cascades to type plpgsql_pipelined_unsupported.tb_type_0013 +drop cascades to table plpgsql_pipelined_unsupported.t_pipelined_0013 +drop cascades to function plpgsql_pipelined_unsupported.func_pipelined_0013(numeric) diff --git a/src/test/regress/sql/plpgsql/plpgsql_normal.sql b/src/test/regress/sql/plpgsql/plpgsql_normal.sql index 2e279cbacc..0779602d81 100644 --- a/src/test/regress/sql/plpgsql/plpgsql_normal.sql +++ b/src/test/regress/sql/plpgsql/plpgsql_normal.sql @@ -149,5 +149,26 @@ END; / select count(*) from stockpivot_pl(); + +create table test(id int); +CREATE OR REPLACE FUNCTION insert_test() returns VOID LANGUAGE plpgsql AS +$BODY$ + DECLARE PRAGMA AUTONOMOUS_TRANSACTION; +begin + insert into test select * from get_table_of_int(1); +end; +$BODY$; + +CREATE OR REPLACE FUNCTION get_tab_ptf_failed() returns t_tf_tab pipelined LANGUAGE plpgsql AS +$BODY$ +declare result t_tf_row; +begin + perform insert_test(); + insert into test values(5); +end; +$BODY$; + +select get_tab_ptf_failed(); + reset search_path; drop schema plpgsql_pipelined cascade; \ No newline at end of file diff --git a/src/test/regress/sql/plpgsql/plpgsql_unsupported.sql b/src/test/regress/sql/plpgsql/plpgsql_unsupported.sql index b374e22b3e..478312dade 100644 --- a/src/test/regress/sql/plpgsql/plpgsql_unsupported.sql +++ b/src/test/regress/sql/plpgsql/plpgsql_unsupported.sql @@ -57,6 +57,7 @@ CREATE OR REPLACE FUNCTION func_test(n NUMBER) RETURN _int4 PIPELINED IS BEGIN return 1; END; +/ -- do not call return: success CREATE OR REPLACE FUNCTION func_test(n NUMBER) RETURN _int4 PIPELINED IS @@ -163,5 +164,28 @@ END; / alter function alter_func_pipelined(number) pipelined; + +create or replace type tb_type_0013 as table of varchar2(2000); +drop table if exists t_pipelined_0013; +create table t_pipelined_0013(c1 int); +create or replace function func_pipelined_0013(count in number) +returns tb_type_0013 pipelined language plpgsql as +$BODY$ +declare +begin +for i in 1 .. count loop +insert into t_pipelined_0013 values(i); +-- pipe row( 'insert into test values( ' || i || ') success'); +perform pg_sleep(1); +update t_pipelined_0013 set c1 = 10 where c1 = i; +end loop; +-- pipe row( 'All done!' ); +return; +end; +$BODY$; + +-- cannot perform a DML operation inside a query +select func_pipelined_0013(3); + reset search_path; drop schema plpgsql_pipelined_unsupported cascade; -- Gitee From 378db1c7548a51e374c29eb809682803f7d9f098 Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Mon, 8 Jul 2024 09:58:04 +0800 Subject: [PATCH 018/347] bugfix: owner nullptr --- src/common/pl/plpgsql/src/pl_exec.cpp | 9 ++---- .../expected/plpgsql/plpgsql_normal.out | 30 ++++++++++++++++++- .../regress/sql/plpgsql/plpgsql_normal.sql | 20 +++++++++++++ 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index 8eadae6da5..3c1427f763 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -7379,13 +7379,8 @@ void plpgsql_estate_setup(PLpgSQL_execstate* estate, PLpgSQL_function* func, Ret estate->tuple_store = NULL; estate->cursor_return_data = NULL; estate->cursor_return_numbers = 0; - if (rsi != NULL) { - estate->tuple_store_cxt = rsi->econtext->ecxt_per_query_memory; - estate->tuple_store_owner = t_thrd.utils_cxt.CurrentResourceOwner; - } else { - estate->tuple_store_cxt = NULL; - estate->tuple_store_owner = NULL; - } + estate->tuple_store_cxt = rsi != NULL ? rsi->econtext->ecxt_per_query_memory : NULL; + estate->tuple_store_owner = (rsi != NULL || estate->is_pipelined) ? t_thrd.utils_cxt.CurrentResourceOwner : NULL; estate->rsi = rsi; estate->found_varno = func->found_varno; diff --git a/src/test/regress/expected/plpgsql/plpgsql_normal.out b/src/test/regress/expected/plpgsql/plpgsql_normal.out index b07a56c433..d68ba926a6 100644 --- a/src/test/regress/expected/plpgsql/plpgsql_normal.out +++ b/src/test/regress/expected/plpgsql/plpgsql_normal.out @@ -300,6 +300,31 @@ select count(*) from stockpivot_pl(); 100 (1 row) +-- lower workmem +set work_mem = '64kB'; +create or replace type type_0022 as (c1 integer, c2 tinyint); +create or replace type tb_type_0022 as table of type_0022; +create or replace function func_pipelined_022(count in number) + returns tb_type_0022 pipelined language plpgsql as +$BODY$ +declare result type_0022; +begin +for i in 1 .. count loop +result.c1 = 123; +result.c2 = 32; +pipe row(result); +end loop; +return; +end; +$BODY$; +select count(*) from func_pipelined_022(10000); + count +------- + 10000 +(1 row) + +reset work_mem; +-- nest function call for ereport create table test(id int); CREATE OR REPLACE FUNCTION insert_test() returns VOID LANGUAGE plpgsql AS $BODY$ @@ -323,7 +348,7 @@ PL/pgSQL function get_tab_ptf_failed() line 5 at SQL statement referenced column: get_tab_ptf_failed reset search_path; drop schema plpgsql_pipelined cascade; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 18 other objects DETAIL: drop cascades to type plpgsql_pipelined.t_tf_row drop cascades to type plpgsql_pipelined.t_tf_tab drop cascades to function plpgsql_pipelined.get_tab_ptf(numeric) @@ -336,6 +361,9 @@ drop cascades to function plpgsql_pipelined.do_nothing_func(numeric) drop cascades to function plpgsql_pipelined.fuc(integer) drop cascades to table plpgsql_pipelined.stocktable drop cascades to function plpgsql_pipelined.stockpivot_pl() +drop cascades to type plpgsql_pipelined.type_0022 +drop cascades to type plpgsql_pipelined.tb_type_0022 +drop cascades to function plpgsql_pipelined.func_pipelined_022(numeric) drop cascades to table plpgsql_pipelined.test drop cascades to function plpgsql_pipelined.insert_test() drop cascades to function plpgsql_pipelined.get_tab_ptf_failed() diff --git a/src/test/regress/sql/plpgsql/plpgsql_normal.sql b/src/test/regress/sql/plpgsql/plpgsql_normal.sql index 0779602d81..09c0a4ae3d 100644 --- a/src/test/regress/sql/plpgsql/plpgsql_normal.sql +++ b/src/test/regress/sql/plpgsql/plpgsql_normal.sql @@ -149,7 +149,27 @@ END; / select count(*) from stockpivot_pl(); +-- lower workmem +set work_mem = '64kB'; +create or replace type type_0022 as (c1 integer, c2 tinyint); +create or replace type tb_type_0022 as table of type_0022; +create or replace function func_pipelined_022(count in number) + returns tb_type_0022 pipelined language plpgsql as +$BODY$ +declare result type_0022; +begin +for i in 1 .. count loop +result.c1 = 123; +result.c2 = 32; +pipe row(result); +end loop; +return; +end; +$BODY$; +select count(*) from func_pipelined_022(10000); +reset work_mem; +-- nest function call for ereport create table test(id int); CREATE OR REPLACE FUNCTION insert_test() returns VOID LANGUAGE plpgsql AS $BODY$ -- Gitee From 142c3e3ad98b639d94808008108baf9bd82efcd2 Mon Sep 17 00:00:00 2001 From: JulianZhang Date: Mon, 8 Jul 2024 11:08:01 +0800 Subject: [PATCH 019/347] advance xlog files --- .../storage/access/transam/xlog.cpp | 22 ++++++++++++++----- src/include/access/xlog.h | 5 +---- .../regress/expected/advance_xlog_files.out | 21 ++++++++++++++++++ src/test/regress/parallel_schedule0 | 1 + src/test/regress/sql/advance_xlog_files.sql | 19 ++++++++++++++++ 5 files changed, 59 insertions(+), 9 deletions(-) create mode 100644 src/test/regress/expected/advance_xlog_files.out create mode 100644 src/test/regress/sql/advance_xlog_files.sql diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 2c2be96c3d..08ed674aae 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -2647,6 +2647,7 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) bool last_iteration = false; bool finishing_seg = false; bool use_existent = false; + bool segs_enough = true; int curridx = 0; int npages = 0; int startidx = 0; @@ -2716,12 +2717,19 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) t_thrd.xlog_cxt.openLogFile = XLogFileInit(t_thrd.xlog_cxt.openLogSegNo, &use_existent, true); t_thrd.xlog_cxt.openLogOff = 0; + segs_enough = true; + if (g_instance.attr.attr_storage.wal_file_init_num > 0 && g_instance.wal_cxt.globalEndPosSegNo != InvalidXLogSegPtr && + g_instance.wal_cxt.globalEndPosSegNo >= t_thrd.xlog_cxt.openLogSegNo) { + segs_enough = (g_instance.wal_cxt.globalEndPosSegNo - t_thrd.xlog_cxt.openLogSegNo) + > (g_instance.attr.attr_storage.wal_file_init_num * 0.2); + } + /* * Unlock WalAuxiliary thread to init new xlog segment if we are running out - * of xlog segments. + * of xlog segments, or available segments is less than wal_file_init_num * 0.2. */ - if (!use_existent) { - g_instance.wal_cxt.globalEndPosSegNo = t_thrd.xlog_cxt.openLogSegNo; + if (!use_existent || !segs_enough) { + g_instance.wal_cxt.globalEndPosSegNo = Max(g_instance.wal_cxt.globalEndPosSegNo, t_thrd.xlog_cxt.openLogSegNo); WakeupWalSemaphore(&g_instance.wal_cxt.walInitSegLock->l.sem); } } @@ -4242,7 +4250,7 @@ bool PreInitXlogFileForStandby(XLogRecPtr requestLsn) return true; } -void PreInitXlogFileForPrimary(int advance_xlog_file_num) +void PreInitXlogFileForPrimary(int wal_file_init_num) { XLogSegNo startSegNo, nextSegNo, target; int lf; @@ -4254,7 +4262,7 @@ void PreInitXlogFileForPrimary(int advance_xlog_file_num) return; } startSegNo = g_instance.wal_cxt.globalEndPosSegNo + 1; - target = startSegNo + advance_xlog_file_num - 1; + target = startSegNo + wal_file_init_num - 1; for (nextSegNo = startSegNo; nextSegNo <= target; nextSegNo++) { use_existent = true; lf = XLogFileInit(nextSegNo, &use_existent, true); @@ -4496,6 +4504,10 @@ static bool InstallXLogFileSegment(XLogSegNo *segno, const char *tmppath, bool f return false; } + if (*segno > g_instance.wal_cxt.globalEndPosSegNo) { + g_instance.wal_cxt.globalEndPosSegNo = *segno; + } + if (use_lock) { LWLockRelease(ControlFileLock); } diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index fa5931dc19..5284da1b96 100755 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -361,7 +361,6 @@ typedef struct XLogwrtResult { XLogRecPtr Flush; /* last byte + 1 flushed */ } XLogwrtResult; -extern void XLogMultiFileInit(int advance_xlog_file_num); typedef struct XLogwrtPaxos { XLogRecPtr Write; /* last byte + 1 written out */ XLogRecPtr Consensus; /* last byte + 1 consensus in DCF */ @@ -708,8 +707,6 @@ extern XLogSegNo GetNewestXLOGSegNo(const char* workingPath); #define XLOG_CONTAIN_CSN 0x80000000 #define XLOG_MASK_TERM 0x7FFFFFFF -extern void XLogMultiFileInit(int advance_xlog_file_num); - extern XLogRecPtr XLogInsertRecord(struct XLogRecData* rdata, XLogRecPtr fpw_lsn); extern void XLogWaitFlush(XLogRecPtr recptr); extern void XLogWaitBufferInit(XLogRecPtr recptr); @@ -719,7 +716,7 @@ extern bool XLogNeedsFlush(XLogRecPtr RecPtr); extern int XLogFileInit(XLogSegNo segno, bool* use_existent, bool use_lock); extern int XLogFileOpen(XLogSegNo segno); extern bool PreInitXlogFileForStandby(XLogRecPtr requestLsn); -extern void PreInitXlogFileForPrimary(int advance_xlog_file_num); +extern void PreInitXlogFileForPrimary(int wal_file_init_num); extern void CheckXLogRemoved(XLogSegNo segno, TimeLineID tli); extern void XLogSetAsyncXactLSN(XLogRecPtr record); diff --git a/src/test/regress/expected/advance_xlog_files.out b/src/test/regress/expected/advance_xlog_files.out new file mode 100644 index 0000000000..8f070cab71 --- /dev/null +++ b/src/test/regress/expected/advance_xlog_files.out @@ -0,0 +1,21 @@ +-- show wal_file_init_num, the default value should be 10 +show wal_file_init_num; + wal_file_init_num +------------------- + 10 +(1 row) + +-- create test table with large row size +create table test_advance_xlog_files (c1 int, c2 varchar(500)); +-- insert rows to make xlog files consumed, then trigger to init xlog files. +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +drop table test_advance_xlog_files; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 95c8b87ee7..425d470759 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -1127,3 +1127,4 @@ test: enable_expr_fusion_flatten test: on_update_session1 on_update_session2 test: ts_gb18030_utf8 +test: advance_xlog_files diff --git a/src/test/regress/sql/advance_xlog_files.sql b/src/test/regress/sql/advance_xlog_files.sql new file mode 100644 index 0000000000..62615a0fa1 --- /dev/null +++ b/src/test/regress/sql/advance_xlog_files.sql @@ -0,0 +1,19 @@ +-- show wal_file_init_num, the default value should be 10 +show wal_file_init_num; + +-- create test table with large row size +create table test_advance_xlog_files (c1 int, c2 varchar(500)); + +-- insert rows to make xlog files consumed, then trigger to init xlog files. +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); +insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); + +drop table test_advance_xlog_files; \ No newline at end of file -- Gitee From 547d2cf0d5c88c306ec952fb76a1118b395fd13b Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Mon, 8 Jul 2024 14:58:44 +0800 Subject: [PATCH 020/347] do not reduce sort of subquery when query contains limit etc --- .../optimizer/prep/prepnonjointree.cpp | 4 ++ src/test/regress/expected/reduce_orderby.out | 59 +++++++++++++++++++ src/test/regress/sql/reduce_orderby.sql | 19 ++++++ 3 files changed, 82 insertions(+) diff --git a/src/gausskernel/optimizer/prep/prepnonjointree.cpp b/src/gausskernel/optimizer/prep/prepnonjointree.cpp index 5f4eaf6301..f81f2d116b 100755 --- a/src/gausskernel/optimizer/prep/prepnonjointree.cpp +++ b/src/gausskernel/optimizer/prep/prepnonjointree.cpp @@ -2137,6 +2137,10 @@ static void reduce_orderby_recurse(Query* query, Node* jtnode, bool reduce) if (IsA(jtnode, RangeTblRef)) { int varno = ((RangeTblRef*)jtnode)->rtindex; RangeTblEntry* rte = rt_fetch(varno, query->rtable); + /* do not reduce sort of subquery when query contains limit etc*/ + if (reduce && (query->limitOffset || query->limitCount || contain_rownum_walker((Node*)query, NULL))) { + return; + } /* Reduce orderby clause in subquery for join or from clause of more than one rte */ reduce_orderby_final(rte, reduce); diff --git a/src/test/regress/expected/reduce_orderby.out b/src/test/regress/expected/reduce_orderby.out index d0de2a4eae..29b917cd03 100644 --- a/src/test/regress/expected/reduce_orderby.out +++ b/src/test/regress/expected/reduce_orderby.out @@ -272,7 +272,66 @@ WHERE init.product = 'books'; 2.2500000000000000 | 152 (1 row) +explain (costs off) SELECT avg(o.quantity) AS avg_quantity, +sum(o.revenue) AS total_revenue +FROM ( +SELECT DISTINCT ON (user_id) +user_id, product +FROM orders +ORDER BY user_id, order_date +) init +JOIN orders o USING (user_id, product) +WHERE init.product = 'books' offset 10; + QUERY PLAN +----------------------------------------------------------------------------- + Limit + -> Aggregate + -> Nested Loop + Join Filter: (init.user_id = o.user_id) + -> Subquery Scan on init + Filter: (init.product = 'books'::text) + -> Unique + -> Sort + Sort Key: orders.user_id, orders.order_date + -> Seq Scan on orders + -> Seq Scan on orders o + Filter: (product = 'books'::text) +(12 rows) + +create table outer_(id int); +create table inner_(id1 int, id2 int); +set enable_hashjoin TO off; +set enable_mergejoin to off; +explain (costs off) select * from (select * from outer_ order by id desc) o left join inner_ i on o.id = i.id1 limit 1; + QUERY PLAN +------------------------------------------ + Limit + -> Nested Loop Left Join + Join Filter: (outer_.id = i.id1) + -> Sort + Sort Key: outer_.id DESC + -> Seq Scan on outer_ + -> Materialize + -> Seq Scan on inner_ i +(8 rows) + +explain (costs off) select * from (select * from outer_ order by id desc) o left join inner_ i on o.id = i.id1 where rownum < 10; + QUERY PLAN +------------------------------------------ + Limit + -> Nested Loop Left Join + Join Filter: (outer_.id = i.id1) + -> Sort + Sort Key: outer_.id DESC + -> Seq Scan on outer_ + -> Materialize + -> Seq Scan on inner_ i +(8 rows) + drop table orders; --drop schema reset current_schema; drop schema reduce_orderby cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table reduce_orderby.outer_ +drop cascades to table reduce_orderby.inner_ diff --git a/src/test/regress/sql/reduce_orderby.sql b/src/test/regress/sql/reduce_orderby.sql index d55d3463e3..2bd6808398 100644 --- a/src/test/regress/sql/reduce_orderby.sql +++ b/src/test/regress/sql/reduce_orderby.sql @@ -78,6 +78,25 @@ ORDER BY user_id, order_date JOIN orders o USING (user_id, product) WHERE init.product = 'books'; + +explain (costs off) SELECT avg(o.quantity) AS avg_quantity, +sum(o.revenue) AS total_revenue +FROM ( +SELECT DISTINCT ON (user_id) +user_id, product +FROM orders +ORDER BY user_id, order_date +) init +JOIN orders o USING (user_id, product) +WHERE init.product = 'books' offset 10; + +create table outer_(id int); +create table inner_(id1 int, id2 int); +set enable_hashjoin TO off; +set enable_mergejoin to off; +explain (costs off) select * from (select * from outer_ order by id desc) o left join inner_ i on o.id = i.id1 limit 1; +explain (costs off) select * from (select * from outer_ order by id desc) o left join inner_ i on o.id = i.id1 where rownum < 10; + drop table orders; --drop schema reset current_schema; -- Gitee From 9ec3eba6e017c944751f3feb4645dc4bed4b080e Mon Sep 17 00:00:00 2001 From: lukeman Date: Sun, 28 Apr 2024 09:44:55 +0800 Subject: [PATCH 021/347] =?UTF-8?q?=E6=95=B0=E6=8D=AE=E5=A4=87=E4=BB=BD?= =?UTF-8?q?=E6=94=AF=E6=8C=81oss?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- build/script/aarch64_lite_list | 2 + build/script/aarch64_opengauss_list | 2 + .../opengauss_release_list_ubuntu_single | 2 + build/script/x86_64_lite_list | 2 + build/script/x86_64_opengauss_list | 2 + cmake/src/set_thirdparty_path.cmake | 7 + src/Makefile.global.in | 17 +- src/Makefile.global.in_for_llt | 7 + src/bin/pg_probackup/CMakeLists.txt | 8 +- src/bin/pg_probackup/Makefile | 13 +- src/bin/pg_probackup/backup.cpp | 49 +- src/bin/pg_probackup/catalog.cpp | 67 ++- src/bin/pg_probackup/configure.cpp | 80 +++- src/bin/pg_probackup/data.cpp | 193 +++++--- src/bin/pg_probackup/delete.cpp | 67 ++- src/bin/pg_probackup/dir.cpp | 12 + src/bin/pg_probackup/file.cpp | 69 ++- src/bin/pg_probackup/init.cpp | 21 + src/bin/pg_probackup/merge.cpp | 5 + src/bin/pg_probackup/oss/appender.cpp | 227 +++++++++ src/bin/pg_probackup/oss/backup.cpp | 317 ++++++++++++ src/bin/pg_probackup/oss/buffer.cpp | 176 +++++++ src/bin/pg_probackup/oss/include/appender.h | 80 ++++ src/bin/pg_probackup/oss/include/backup.h | 44 ++ src/bin/pg_probackup/oss/include/buffer.h | 136 ++++++ .../pg_probackup/oss/include/oss_operator.h | 91 ++++ src/bin/pg_probackup/oss/include/restore.h | 46 ++ src/bin/pg_probackup/oss/include/thread.h | 124 +++++ src/bin/pg_probackup/oss/oss_operator.cpp | 288 +++++++++++ src/bin/pg_probackup/oss/restore.cpp | 350 ++++++++++++++ src/bin/pg_probackup/oss/thread.cpp | 453 ++++++++++++++++++ src/bin/pg_probackup/pg_probackup.cpp | 28 +- src/bin/pg_probackup/pg_probackupb.h | 46 ++ src/bin/pg_probackup/pg_probackupc.h | 42 +- src/bin/pg_probackup/restore.cpp | 100 ++-- src/bin/pg_probackup/show.cpp | 17 +- src/bin/pg_probackup/util.cpp | 26 + src/bin/pg_probackup/validate.cpp | 29 +- src/include/storage/file/fio_device_com.h | 7 + src/include/tool_common.h | 9 + 40 files changed, 3107 insertions(+), 154 deletions(-) create mode 100644 src/bin/pg_probackup/oss/appender.cpp create mode 100644 src/bin/pg_probackup/oss/backup.cpp create mode 100644 src/bin/pg_probackup/oss/buffer.cpp create mode 100644 src/bin/pg_probackup/oss/include/appender.h create mode 100644 src/bin/pg_probackup/oss/include/backup.h create mode 100644 src/bin/pg_probackup/oss/include/buffer.h create mode 100644 src/bin/pg_probackup/oss/include/oss_operator.h create mode 100644 src/bin/pg_probackup/oss/include/restore.h create mode 100644 src/bin/pg_probackup/oss/include/thread.h create mode 100644 src/bin/pg_probackup/oss/oss_operator.cpp create mode 100644 src/bin/pg_probackup/oss/restore.cpp create mode 100644 src/bin/pg_probackup/oss/thread.cpp diff --git a/build/script/aarch64_lite_list b/build/script/aarch64_lite_list index 38b7eacdd5..fdbd3de107 100644 --- a/build/script/aarch64_lite_list +++ b/build/script/aarch64_lite_list @@ -701,6 +701,8 @@ ./lib/libdcf.so ./lib/libzstd.so* ./lib/libcurl.so* +./lib/libaws-cpp-sdk-core.so +./lib/libaws-cpp-sdk-s3.so ./lib/libxgboost.so ./lib/libpagecompression.so* ./lib/postgresql/latin2_and_win1250.so diff --git a/build/script/aarch64_opengauss_list b/build/script/aarch64_opengauss_list index dc5d52fa5c..102ad439e3 100644 --- a/build/script/aarch64_opengauss_list +++ b/build/script/aarch64_opengauss_list @@ -876,6 +876,8 @@ ./lib/krb5/plugins/kdb/db2.so ./lib/libverto.so* ./lib/libcurl.so* +./lib/libaws-cpp-sdk-core.so +./lib/libaws-cpp-sdk-s3.so ./lib/libcrypto.so* ./lib/libssl.so* ./lib/libgcc_s.so.1 diff --git a/build/script/opengauss_release_list_ubuntu_single b/build/script/opengauss_release_list_ubuntu_single index 6172210264..0632e8e03c 100644 --- a/build/script/opengauss_release_list_ubuntu_single +++ b/build/script/opengauss_release_list_ubuntu_single @@ -869,6 +869,8 @@ ./lib/libcurl.so ./lib/libcurl.so.4 ./lib/libcurl.so.4.6.0 +./lib/libaws-cpp-sdk-core.so +./lib/libaws-cpp-sdk-s3.so ./lib/libcrypto.so ./lib/libcrypto.so.1.1 ./lib/libssl.so diff --git a/build/script/x86_64_lite_list b/build/script/x86_64_lite_list index ca989045f7..5a1647c2d2 100644 --- a/build/script/x86_64_lite_list +++ b/build/script/x86_64_lite_list @@ -700,6 +700,8 @@ ./lib/libdcf.so ./lib/libzstd.so* ./lib/libcurl.so* +./lib/libaws-cpp-sdk-core.so +./lib/libaws-cpp-sdk-s3.so ./lib/libxgboost.so ./lib/libpagecompression.so* ./lib/postgresql/latin2_and_win1250.so diff --git a/build/script/x86_64_opengauss_list b/build/script/x86_64_opengauss_list index a59ef549a1..98b009cc70 100644 --- a/build/script/x86_64_opengauss_list +++ b/build/script/x86_64_opengauss_list @@ -873,6 +873,8 @@ ./lib/krb5/plugins/kdb/db2.so ./lib/libverto.so* ./lib/libcurl.so* +./lib/libaws-cpp-sdk-core.so +./lib/libaws-cpp-sdk-s3.so ./lib/libcrypto.so* ./lib/libssl.so* ./lib/libgcc_s.so.1 diff --git a/cmake/src/set_thirdparty_path.cmake b/cmake/src/set_thirdparty_path.cmake index f6eba3f040..6b0bb651ff 100755 --- a/cmake/src/set_thirdparty_path.cmake +++ b/cmake/src/set_thirdparty_path.cmake @@ -61,6 +61,7 @@ set(LLVM_HOME ${DEPENDENCY_PATH}/llvm/${LIB_UNIFIED_SUPPORT}) set(LZ4_HOME ${DEPENDENCY_PATH}/lz4/${SUPPORT_LLT}) set(NANOMSG_HOME ${DEPENDENCY_PATH}/nng/${LIB_UNIFIED_SUPPORT}) set(NCURSES_HOME ${DEPENDENCY_PATH}/ncurses/${SUPPORT_LLT}) +set(AWSSDK_HOME ${DEPENDENCY_PATH}/aws-sdk-cpp/${SUPPORT_LLT}) if(($ENV{WITH_TASSL}) STREQUAL "YES") set(OPENSSL_HOME ${DEPENDENCY_PATH}/tassl/${LIB_UNIFIED_SUPPORT}) else() @@ -193,6 +194,12 @@ set(LIBCGROUP_LIB_PATH ${CGROUP_HOME}/lib) set(LIBCURL_INCLUDE_PATH ${CURL_HOME}/include) set(LIBCURL_LIB_PATH ${CURL_HOME}/lib) +############################################################################# +# awssdk component +############################################################################# +set(AWSSDK_INCLUDE_PATH ${AWSSDK_HOME}/include) +set(AWSSDK_LIB_PATH ${AWSSDK_HOME}/lib) + ############################################################################# # edit component ############################################################################# diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 7eb8bda492..b277365ce4 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -300,6 +300,7 @@ ifeq ($(with_3rd), NONE) MOCKCPP_HOME = $(top_builddir)/$(BUILD_TOOLS_PATH)/mockcpp/$(LIB_NOT_SUPPORT_LLT) NUMA_HOME = $(top_builddir)/$(BINARYPATH)/numactl/$(LIB_SUPPORT_LLT) LIBCURL_HOME = $(top_builddir)/$(BINARYPATH)/libcurl/$(LIB_SUPPORT_LLT) + AWSSDK_HOME = $(top_builddir)/$(BINARYPATH)/aws-sdk-cpp/$(LIB_SUPPORT_LLT) ZSTD_HOME = $(top_builddir)/$(BINARYPATH)/zstd LIBNANOMSG_HOME = $(top_builddir)/$(BINARYPATH)/nng/comm PLJAVA_HOME = $(top_builddir)/$(BINARYPATH)/pljava/$(LIB_SUPPORT_LLT) @@ -349,6 +350,7 @@ endif MOCKCPP_HOME = $(with_3rd)/$(BUILD_TOOLS_PATH)/mockcpp/$(LIB_NOT_SUPPORT_LLT) NUMA_HOME = $(with_3rd)/$(BINARYPATH)/numactl/$(LIB_SUPPORT_LLT) LIBCURL_HOME = $(with_3rd)/$(BINARYPATH)/libcurl/$(LIB_SUPPORT_LLT) + AWSSDK_HOME = $(with_3rd)/$(BINARYPATH)/aws-sdk-cpp/$(LIB_SUPPORT_LLT) ZSTD_HOME = $(with_3rd)/$(BINARYPATH)/zstd LIBNANOMSG_HOME = $(with_3rd)/$(BINARYPATH)/nng/comm PLJAVA_HOME = $(with_3rd)/$(BINARYPATH)/pljava/$(LIB_SUPPORT_LLT) @@ -647,6 +649,12 @@ NUMA_LIB_PATH = $(NUMA_HOME)/lib LIBCURL_INCLUDE_PATH = $(LIBCURL_HOME)/include LIBCURL_LIB_PATH = $(LIBCURL_HOME)/lib +############################################################################# +# awssdk component +############################################################################# +AWSSDK_INCLUDE_PATH = $(AWSSDK_HOME)/include +AWSSDK_LIB_PATH = $(AWSSDK_HOME)/lib + ############################################################################# # masstree component ############################################################################# @@ -728,7 +736,7 @@ else # not PGXS endif endif -override CPPFLAGS := $(CPPFLAGS) -I$(LIBODBC_INCLUDE_PATH) -I$(LIBOBS_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) -I$(LIBOPENSSL_INCLUDE_PATH) -I${BOOST_INCLUDE_PATH} -I$(LIBLLVM_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) -I$(CJSON_INCLUDE_PATH) -I$(NUMA_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) -I$(LZ4_INCLUDE_PATH) -I$(ZSTD_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -I$(DCF_INCLUDE_PATH) +override CPPFLAGS := $(CPPFLAGS) -I$(LIBODBC_INCLUDE_PATH) -I$(LIBOBS_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) -I$(LIBOPENSSL_INCLUDE_PATH) -I${BOOST_INCLUDE_PATH} -I$(LIBLLVM_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) -I$(CJSON_INCLUDE_PATH) -I$(NUMA_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) -I$(LZ4_INCLUDE_PATH) -I$(ZSTD_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -I$(AWSSDK_INCLUDE_PATH) -I$(DCF_INCLUDE_PATH) # GDS links to libevent ifeq ($(enable_multiple_nodes), yes) @@ -894,6 +902,7 @@ endif LDFLAGS += -L$(GSTRACE_LIB_PATH) LDFLAGS += -L$(NUMA_LIB_PATH) LDFLAGS += -L$(LIBCURL_LIB_PATH) +LDFLAGS += -L$(AWSSDK_LIB_PATH) ifeq ($(enable_mot), yes) LDFLAGS += -L$(MASSTREE_LIB_PATH) endif @@ -948,6 +957,12 @@ GEN_KEYWORDLIST_DEPS = $(TOOLSDIR)/gen_keywordlist.pl $(TOOLSDIR)/PerfectHash.pm LIBCURL_INCLUDE_PATH = $(LIBCURL_HOME)/include LIBCURL_LIB_PATH = $(LIBCURL_HOME)/lib +############################################################################# +# awssdk component +############################################################################# +AWSSDK_INCLUDE_PATH = $(AWSSDK_HOME)/include +AWSSDK_LIB_PATH = $(AWSSDK_HOME)/lib + ############################################################################# # Perl diff --git a/src/Makefile.global.in_for_llt b/src/Makefile.global.in_for_llt index 83eac42458..955e120dd7 100755 --- a/src/Makefile.global.in_for_llt +++ b/src/Makefile.global.in_for_llt @@ -660,6 +660,13 @@ LIBCURL_HOME = $(top_builddir)/../$(BINARYPATH)/libcurl/comm LIBCURL_INCLUDE_PATH = $(LIBCURL_HOME)/include LIBCURL_LIB_PATH = $(LIBCURL_HOME)/lib +############################################################################# +# awssdk component +############################################################################# +AWSSDK_HOME = $(top_builddir)/../$(BINARYPATH)/aws-sdk-cpp/comm +AWSSDK_INCLUDE_PATH = $(AWSSDK_HOME)/include +AWSSDK_LIB_PATH = $(AWSSDK_HOME)/lib + ############################################################################# ############################################################################# diff --git a/src/bin/pg_probackup/CMakeLists.txt b/src/bin/pg_probackup/CMakeLists.txt index 193cd59093..9d5e9a5d51 100755 --- a/src/bin/pg_probackup/CMakeLists.txt +++ b/src/bin/pg_probackup/CMakeLists.txt @@ -15,12 +15,14 @@ execute_process( AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_probackup_SRC) -set(TGT_probackup_INC ${ZLIB_INCLUDE_PATH} ${LZ4_INCLUDE_PATH} ${ZSTD_INCLUDE_PATH} ${PROJECT_SRC_DIR}/lib/page_compression ${PROJECT_SRC_DIR}/include/storage/gs_uwal) +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/oss TGT_probackup_SRC) + +set(TGT_probackup_INC ${ZLIB_INCLUDE_PATH} ${LZ4_INCLUDE_PATH} ${ZSTD_INCLUDE_PATH} ${PROJECT_SRC_DIR}/lib/page_compression ${PROJECT_SRC_DIR}/include/storage/gs_uwal ${AWSSDK_INCLUDE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/oss) set(probackup_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND -DHAVE_LIBZ) set(probackup_COMPILE_OPTIONS ${PROTECT_OPTIONS} ${BIN_SECURE_OPTIONS} ${OS_OPTIONS} ${WARNING_OPTIONS} ${OPTIMIZE_OPTIONS} ${CHECK_OPTIONS}) set(probackup_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(probackup_LINK_LIBS libpgcommon.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4 -lpq -lpagecompression -lzstd) +set(probackup_LINK_LIBS libpgcommon.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4 -lpq -lpagecompression -lzstd -laws-cpp-sdk-core -laws-cpp-sdk-s3) if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") list(APPEND probackup_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) endif() @@ -32,6 +34,6 @@ add_bintarget(gs_probackup TGT_probackup_SRC TGT_probackup_INC "${probackup_DEF_ add_dependencies(gs_probackup pq pgport_static pagecompression) target_link_directories(gs_probackup PUBLIC ${CMAKE_BINARY_DIR}/lib ${LIBOPENSSL_LIB_PATH} ${LIBEDIT_LIB_PATH} ${ZSTD_LIB_PATH} - ${ZLIB_LIB_PATH} ${KERBEROS_LIB_PATH} ${LZ4_LIB_PATH} ${SECURE_LIB_PATH} ${probackup_LINK_DIRS} + ${ZLIB_LIB_PATH} ${KERBEROS_LIB_PATH} ${LZ4_LIB_PATH} ${SECURE_LIB_PATH} ${probackup_LINK_DIRS} ${AWSSDK_LIB_PATH} ) install(TARGETS gs_probackup RUNTIME DESTINATION bin) diff --git a/src/bin/pg_probackup/Makefile b/src/bin/pg_probackup/Makefile index dee268dd15..5cb45cf0ea 100644 --- a/src/bin/pg_probackup/Makefile +++ b/src/bin/pg_probackup/Makefile @@ -25,6 +25,8 @@ OBJS += $(top_builddir)/src/lib/pgcommon/libpgcommon.a \ $(top_builddir)/src/gausskernel/storage/dss/dss_adaptor.o \ $(top_builddir)/src/gausskernel/storage/gs_uwal/gs_uwal_adaptor.o +OBJS += oss/appender.o oss/backup.o oss/restore.o oss/buffer.o oss/oss_operator.o oss/thread.o + EXTRA_CLEAN = datapagemap.cpp datapagemap.h \ receivelog.cpp receivelog.h streamutil.cpp streamutil.h \ xlogreader.cpp instr_time.h @@ -34,6 +36,9 @@ top_builddir = ../../.. include $(top_builddir)/src/Makefile.global EXTRA_CLEAN += logging.h +AWS_SDK_INCLUDE_PATH = $(AWSSDK_INCLUDE_PATH) +AWS_SDK_LIB_PATH = $(AWSSDK_LIB_PATH) + LDFLAGS += -L$(LZ4_LIB_PATH) -L$(ZSTD_LIB_PATH) LIBS += -llz4 ifeq ($(enable_lite_mode), no) @@ -42,11 +47,17 @@ endif PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_builddir)/$(subdir) -I$(LZ4_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) -I$(ZSTD_INCLUDE_PATH) # add page_compression so .h LDFLAGS += -L../../lib/page_compression -PG_CPPFLAGS = -I../../lib/page_compression -I../../src/common/port +PG_CPPFLAGS = -I../../lib/page_compression -I../../src/common/port -I$(AWS_SDK_INCLUDE_PATH) LIBS += -lpagecompression -lzstd override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) -DHAVE_LIBZ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} +# add oss .h +LDFLAGS += -I$(top_builddir)/$(subdir)/oss/include + +# add aws s3 so .h +LDFLAGS += -L$(AWS_SDK_LIB_PATH) -laws-cpp-sdk-core -laws-cpp-sdk-s3 -I$(AWS_SDK_INCLUDE_PATH) + all: submake-pagecompression $(PROGRAM) gs_probackup: $(OBJS) | submake-libpq submake-libpgport diff --git a/src/bin/pg_probackup/backup.cpp b/src/bin/pg_probackup/backup.cpp index d6ccfee24f..19655066d4 100644 --- a/src/bin/pg_probackup/backup.cpp +++ b/src/bin/pg_probackup/backup.cpp @@ -29,6 +29,8 @@ #include "common/fe_memutils.h" #include "storage/file/fio_device.h" #include "logger.h" +#include "oss/include/backup.h" +#include "oss/include/restore.h" /* list of dirs which will not to be backuped @@ -65,6 +67,9 @@ static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; /* list of files contained in backup */ static parray *backup_files_list = NULL; +static parray *filesinfo = NULL; +static SenderCxt *current_sender_cxt = NULL; +static ReaderCxt *current_reader_cxt = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -254,7 +259,6 @@ static void run_backup_threads(char *external_prefix, char *database_path, char for (i = 0; i < num_threads; i++) { backup_files_arg *arg = &(threads_args[i]); - elog(VERBOSE, "Start thread num: %i", i); pthread_create(&threads[i], NULL, backup_files, arg); } @@ -521,6 +525,9 @@ static void add_xlog_files_into_backup_list(const char *database_path, const cha file->name = file->rel_path; else file->name++; + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(wal_full_path, wal_full_path); + } } /* Add xlog files into the list of backed up files */ @@ -564,7 +571,6 @@ static void sync_files(parray *database_map, const char *database_path, parray * set_min_recovery_point(pg_control, fullpath, current.stop_lsn); } - /* close and sync page header map */ if (current.hdr_map.fp) { @@ -572,6 +578,10 @@ static void sync_files(parray *database_map, const char *database_path, parray * if (fio_sync(current.hdr_map.path, FIO_BACKUP_HOST) != 0) elog(ERROR, "Cannot sync file \"%s\": %s", current.hdr_map.path, strerror(errno)); + + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(current.hdr_map.path, current.hdr_map.path); + } } /* close ssh session in main thread */ @@ -829,8 +839,36 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool /* * Make directories before backup and setup threads at the same time */ - run_backup_threads(external_prefix, database_path, dssdata_path, prev_backup_filelist, - external_dirs, nodeInfo, prev_backup_start_lsn); + if (current.media_type == MEDIA_TYPE_OSS) { + no_sync = true; // no need to sync file to disk + current.oss_status = OSS_STATUS_OSS; + filesinfo = parray_new(); + backup_files_arg arg; + arg.nodeInfo = nodeInfo; + arg.from_root = instance_config.pgdata; + arg.to_root = database_path; + arg.src_dss = instance_config.dss.vgdata; + arg.dst_dss = dssdata_path; + arg.external_prefix = external_prefix; + arg.external_dirs = external_dirs; + arg.files_list = backup_files_list; + arg.prev_filelist = prev_backup_filelist; + arg.prev_start_lsn = prev_backup_start_lsn; + arg.conn_arg.conn = NULL; + arg.conn_arg.cancel_conn = NULL; + arg.hdr_map = &(current.hdr_map); + arg.thread_num = -1; + current.filesinfo = filesinfo; + initBackupSenderContext(¤t_sender_cxt); + current.sender_cxt = current_sender_cxt; + initBackupReaderContexts(¤t_reader_cxt); + current.readerCxt = current_reader_cxt; + performBackup(&arg); + parray_free(filesinfo); + } else { + run_backup_threads(external_prefix, database_path, dssdata_path, prev_backup_filelist, + external_dirs, nodeInfo, prev_backup_start_lsn); + } /* clean previous backup file list */ if (prev_backup_filelist) @@ -841,7 +879,6 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool /* Notify end of backup */ pg_stop_backup(¤t, backup_conn, nodeInfo); - sync_files(database_map, database_path, external_dirs, dssdata_path, external_prefix, no_sync); /* be paranoid about instance been from the past */ @@ -1085,7 +1122,7 @@ do_backup(time_t start_time, pgSetBackupParams *set_backup_params, pin_backup(¤t, set_backup_params); } - if (!no_validate) + if (!no_validate && current.media_type != MEDIA_TYPE_OSS) pgBackupValidate(¤t, NULL); /* do something after backup */ diff --git a/src/bin/pg_probackup/catalog.cpp b/src/bin/pg_probackup/catalog.cpp index e275ecfda6..f5e107ee06 100644 --- a/src/bin/pg_probackup/catalog.cpp +++ b/src/bin/pg_probackup/catalog.cpp @@ -20,6 +20,8 @@ #include "file.h" #include "configuration.h" #include "common/fe_memutils.h" +#include "oss/include/restore.h" +#include "oss/include/oss_operator.h" static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); @@ -458,6 +460,10 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) goto err_proc; } + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigDir(); + } + /* scan the directory and list backups */ backups = parray_new(); for (; (data_ent = fio_readdir(data_dir)) != NULL; errno = 0) @@ -1724,6 +1730,12 @@ do_set_backup(const char *instance_name, time_t backup_id, if (set_backup_params->note) add_note(target_backup, set_backup_params->note); + if (set_backup_params->oss_status >= OSS_STATUS_LOCAL && set_backup_params->oss_status < OSS_STATUS_NUM) { + target_backup->oss_status = set_backup_params->oss_status; + /* Update backup.control */ + write_backup(target_backup, true); + } + parray_walk(backup_list, pgBackupFree); parray_free(backup_list); } @@ -1921,6 +1933,8 @@ pgBackupWriteControl(FILE *out, pgBackup *backup) fio_fprintf(out, "\n#Database Storage type\n"); fio_fprintf(out, "storage-type = %s\n", dev2str(backup->storage_type)); + fio_fprintf(out, "\n#S3 Storage status\n"); + fio_fprintf(out, "s3-status = %s\n", ossStatus2str(backup->oss_status)); } /* @@ -1976,7 +1990,11 @@ write_backup(pgBackup *backup, bool strict) if (rename(path_temp, path) < 0) elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - path_temp, path, strerror(errno)); + path_temp, path, strerror(errno)); + + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(path, path); + } } void flush_and_close_file(pgBackup *backup, bool sync, FILE *out, char *control_path_temp) @@ -2154,6 +2172,10 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", control_path_temp, control_path, strerror(errno)); + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(control_path, control_path); + } + /* use extra variable to avoid reset of previous data_bytes value in case of error */ backup->data_bytes = backup_size_on_disk; backup->uncompressed_bytes = uncompressed_size_on_disk; @@ -2215,6 +2237,7 @@ readBackupControlFile(const char *path) char *recovery_name = NULL; int parsed_options; char *storage_type = NULL; + char *oss_status = NULL; errno_t rc = 0; ConfigOption options[] = @@ -2250,10 +2273,16 @@ readBackupControlFile(const char *path) {'s', 0, "recovery-name", &recovery_name, SOURCE_FILE_STRICT}, {'u', 0, "content-crc", &backup->content_crc, SOURCE_FILE_STRICT}, {'s', 0, "storage-type", &storage_type, SOURCE_FILE_STRICT}, + {'s', 0, "s3-status", &oss_status, SOURCE_FILE_STRICT}, {0} }; pgBackupInit(backup); + + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigFile(path); + } + if (fio_access(path, F_OK, FIO_BACKUP_HOST) != 0) { elog(WARNING, "Control file \"%s\" doesn't exist", path); @@ -2351,6 +2380,14 @@ readBackupControlFile(const char *path) if (storage_type) backup->storage_type = str2dev(storage_type); + if (oss_status) { + backup->oss_status = str2ossStatus(oss_status); + } + + if (current.media_type == MEDIA_TYPE_OSS) { + remove(path); + } + return backup; } @@ -2375,6 +2412,27 @@ parse_backup_mode(const char *value) return BACKUP_MODE_INVALID; } +MediaType +parse_media_type(const char *value) +{ + const char *v = value; + size_t len; + + /* Skip all spaces detected */ + while (IsSpace(*v)) + v++; + len = strlen(v); + + if (len > 0 && pg_strncasecmp("s3", v, len) == 0) + return MEDIA_TYPE_OSS; + else if (len > 0 && pg_strncasecmp("disk", v, len) == 0) + return MEDIA_TYPE_DISK; + + /* media type is invalid, so leave with an error */ + elog(ERROR, "invalid media_type \"%s\"", value); + return MEDIA_TYPE_UNKNOWN; +} + const char * deparse_backup_mode(BackupMode mode) { @@ -2503,6 +2561,9 @@ pgBackupInit(pgBackup *backup) backup->files = NULL; backup->note = NULL; backup->content_crc = 0; + backup->dssdata_bytes = 0; + backup->oss_status = OSS_STATUS_INVALID; + backup->media_type = MEDIA_TYPE_UNKNOWN; } /* free pgBackup object */ @@ -2696,7 +2757,7 @@ scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup) while (target_backup->parent_backup_link) { if (target_backup->status != BACKUP_STATUS_OK && - target_backup->status != BACKUP_STATUS_DONE) + target_backup->status != BACKUP_STATUS_DONE) /* oldest invalid backup in parent chain */ invalid_backup = target_backup; @@ -2707,7 +2768,7 @@ scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup) /* Previous loop will skip FULL backup because his parent_backup_link is NULL */ if (target_backup->backup_mode == BACKUP_MODE_FULL && (target_backup->status != BACKUP_STATUS_OK && - target_backup->status != BACKUP_STATUS_DONE)) + target_backup->status != BACKUP_STATUS_DONE)) { invalid_backup = target_backup; } diff --git a/src/bin/pg_probackup/configure.cpp b/src/bin/pg_probackup/configure.cpp index 9210f36260..2d6d0df47d 100644 --- a/src/bin/pg_probackup/configure.cpp +++ b/src/bin/pg_probackup/configure.cpp @@ -15,7 +15,8 @@ #include "configuration.h" #include "json.h" #include "catalog/pg_control.h" - +#include "oss/include/oss_operator.h" +#include "oss/include/restore.h" static void assign_log_level_console(ConfigOption *opt, const char *arg); static void assign_log_level_file(ConfigOption *opt, const char *arg); @@ -42,6 +43,7 @@ static void show_configure_json(ConfigOption *opt); #define OPTION_COMPRESS_GROUP "Compression parameters" #define OPTION_REMOTE_GROUP "Remote access parameters" #define OPTION_DSS_GROUP "DSS connect parameters" +#define OPTION_OSS_GROUP "OSS connect parameters" /* * Short name should be non-printable ASCII character. @@ -243,6 +245,32 @@ ConfigOption instance_options[] = &instance_config.dss.instance_id, SOURCE_CMD, (OptionSource)0, OPTION_DSS_GROUP, 0, option_get_value }, + /* OSS options */ + { + 's', 236, "access-id", + &instance_config.oss.access_id, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 237, "access-key", + &instance_config.oss.access_key, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 239, "endpoint", + &instance_config.oss.endpoint, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 240, "region", + &instance_config.oss.region, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 243, "access-bucket", + &instance_config.oss.access_bucket, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, { 0 } }; @@ -286,13 +314,22 @@ do_set_config(bool missing_ok) FILE *fp; int i; int nRet = 0; + char* bucket_name = NULL; + bool no_exist = false; join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); nRet = snprintf_s(path_temp, sizeof(path_temp), sizeof(path_temp) - 1, "%s.tmp", path); securec_check_ss_c(nRet, "\0", "\0"); - if (!missing_ok && !fileExists(path, FIO_LOCAL_HOST)) + if (current.media_type == MEDIA_TYPE_OSS) { + Oss::Oss* oss = getOssClient(); + bucket_name = getBucketName(); + no_exist = !oss->ObjectExists(bucket_name, path); + } + + if (!missing_ok && !fileExists(path, FIO_LOCAL_HOST) && no_exist) { elog(ERROR, "Configuration file \"%s\" doesn't exist", path); + } fp = fopen(path_temp, "wt"); if (fp == NULL) @@ -338,6 +375,13 @@ do_set_config(bool missing_ok) elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s", path_temp, path, strerror(errno_temp)); } + + if (current.media_type == MEDIA_TYPE_OSS) { + Oss::Oss* oss = getOssClient(); + oss->RemoveObject(bucket_name, path); + oss->PutObject(bucket_name, path, path); + fio_unlink(path, FIO_BACKUP_HOST); + } } void @@ -558,6 +602,32 @@ readInstanceConfigFile(const char *instance_name) 'i', 235, "instance-id", &instance->dss.instance_id, SOURCE_CMD, (OptionSource)0, OPTION_DSS_GROUP, 0, option_get_value }, + /* OSS options */ + { + 's', 236, "access-id", + &instance_config.oss.access_id, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 237, "access-key", + &instance_config.oss.access_key, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 239, "endpoint", + &instance_config.oss.endpoint, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 240, "region", + &instance_config.oss.region, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, + { + 's', 243, "access-bucket", + &instance_config.oss.access_bucket, SOURCE_CMD, (OptionSource)0, + OPTION_OSS_GROUP, 0, option_get_value + }, { 0 } }; @@ -577,6 +647,9 @@ readInstanceConfigFile(const char *instance_name) join_path_components(path, instance->backup_instance_path, BACKUP_CATALOG_CONF_FILE); + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigFile(path); + } if (fio_access(path, F_OK, FIO_BACKUP_HOST) != 0) { elog(WARNING, "Control file \"%s\" doesn't exist", path); @@ -608,6 +681,9 @@ readInstanceConfigFile(const char *instance_name) instance->xlog_seg_size = DEFAULT_XLOG_SEG_SIZE; #endif + if (current.media_type == MEDIA_TYPE_OSS) { + remove(path); + } return instance; } diff --git a/src/bin/pg_probackup/data.cpp b/src/bin/pg_probackup/data.cpp index 36b3d488fe..d386d3464a 100644 --- a/src/bin/pg_probackup/data.cpp +++ b/src/bin/pg_probackup/data.cpp @@ -31,6 +31,8 @@ #include "zstd.h" #include "storage/file/fio_device.h" #include "storage/buf/bufmgr.h" +#include "oss/include/appender.h" +#include "oss/include/restore.h" typedef struct PreReadBuf { @@ -471,8 +473,8 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno) * or header corruption, * only used for checkdb */ -static int32 -prepare_page(ConnectionArgs *conn_arg, +static +int32 prepare_page(ConnectionArgs *conn_arg, pgFile *file, XLogRecPtr prev_backup_start_lsn, BlockNumber blknum, FILE *in, BackupMode backup_mode, @@ -666,7 +668,8 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, FILE *in, FILE *out, pg_crc32 *crc, int page_state, Page page, CompressAlg calg, int clevel, - const char *from_fullpath, const char *to_fullpath) + const char *from_fullpath, const char *to_fullpath, + FileAppender* appender, char** fileBuffer) { int compressed_size = 0; size_t write_buffer_size = 0; @@ -699,13 +702,29 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, bph->compressed_size = compressed_size; write_buffer_size = compressed_size + sizeof(BackupPageHeader); - /* Update CRC */ - COMP_FILE_CRC32(true, *crc, write_buffer, write_buffer_size); - /* write data page */ - if (fio_fwrite(out, write_buffer, write_buffer_size) != write_buffer_size) - elog(ERROR, "File: \"%s\", cannot write at block %u: %s", - to_fullpath, blknum, strerror(errno)); + if (current.media_type == MEDIA_TYPE_OSS) { + if (fileBuffer != NULL) { + rc = memcpy_s(*fileBuffer, write_buffer_size, write_buffer, write_buffer_size); + securec_check_c(rc, "\0", "\0"); + *fileBuffer += write_buffer_size; + } else { + /* Update CRC */ + COMP_FILE_CRC32(true, *crc, write_buffer, write_buffer_size); + file->crc = *crc; + /* write data page */ + FileAppenderSegHeader content_header; + constructHeader(&content_header, FILE_APPEND_TYPE_FILE_CONTENT, write_buffer_size, 0, file); + writeHeader(&content_header, appender); + writePayload((char*)write_buffer, write_buffer_size, appender); + } + } else { + /* Update CRC */ + COMP_FILE_CRC32(true, *crc, write_buffer, write_buffer_size); + if (fio_fwrite(out, write_buffer, write_buffer_size) != write_buffer_size) + elog(ERROR, "File: \"%s\", cannot write at block %u: %s", + to_fullpath, blknum, strerror(errno)); + } file->write_size += write_buffer_size; file->uncompressed_size += BLCKSZ; @@ -726,7 +745,8 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, - HeaderMap *hdr_map, bool is_merge) + HeaderMap *hdr_map, bool is_merge, + FileAppender* appender, char* fileBuffer) { int rc; bool use_pagemap; @@ -771,6 +791,14 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, file->uncompressed_size = 0; INIT_FILE_CRC32(true, file->crc); + if (fileBuffer == NULL && current.media_type == MEDIA_TYPE_OSS) { + size_t pathLen = strlen(file->rel_path); + FileAppenderSegHeader start_header; + constructHeader(&start_header, FILE_APPEND_TYPE_FILE, pathLen, 0, file); + writeHeader(&start_header, appender); + writePayload((char*)file->rel_path, pathLen, appender); + } + /* * Read each page, verify checksum and write it to backup. * If page map is empty or file is not present in previous backup @@ -794,7 +822,8 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, /* send pagemap if any */ use_pagemap, /* variables for error reporting */ - &err_blknum, &errmsg, &headers); + &err_blknum, &errmsg, &headers, + appender, fileBuffer ? &fileBuffer : NULL); } else { @@ -802,7 +831,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file, /* send prev backup START_LSN */ InvalidXLogRecPtr, calg, clevel, checksum_version, use_pagemap, - &headers, backup_mode); + &headers, backup_mode, appender, fileBuffer); } /* check for errors */ @@ -866,6 +895,12 @@ cleanup: /* dump page headers */ write_page_headers(headers, file, hdr_map, is_merge); + if (fileBuffer == NULL && current.media_type == MEDIA_TYPE_OSS) { + FileAppenderSegHeader end_header; + constructHeader(&end_header, FILE_APPEND_TYPE_FILE_END, 0, file->read_size, file); + writeHeader(&end_header, appender); + } + pg_free(errmsg); pg_free(file->pagemap.bitmap); pg_free(headers); @@ -881,14 +916,14 @@ void backup_non_data_file(pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, - bool missing_ok) + bool missing_ok, FileAppender* appender, char* fileBuffer) { fio_location from_location = is_dss_file(from_fullpath) ? FIO_DSS_HOST : FIO_DB_HOST; /* special treatment for global/pg_control */ if (file->external_dir_num == 0 && strcmp(file->name, PG_XLOG_CONTROL_FILE) == 0) { copy_pgcontrol_file(from_fullpath, from_location, - to_fullpath, FIO_BACKUP_HOST, file); + to_fullpath, FIO_BACKUP_HOST, file); return; } @@ -909,7 +944,23 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, } } - backup_non_data_file_internal(from_fullpath, from_location, to_fullpath, file, true); + if (fileBuffer == NULL && current.media_type == MEDIA_TYPE_OSS) { + // write file start header and from_fullpath + size_t pathLen = strlen(file->rel_path); + INIT_FILE_CRC32(true, file->crc); + FileAppenderSegHeader start_header; + constructHeader(&start_header, FILE_APPEND_TYPE_FILE, pathLen, 0, file); + writeHeader(&start_header, appender); + writePayload((char*)file->rel_path, pathLen, appender); + } + backup_non_data_file_internal(from_fullpath, from_location, to_fullpath, file, true, + appender, fileBuffer ? &fileBuffer : NULL); + if (fileBuffer == NULL && current.media_type == MEDIA_TYPE_OSS) { + // write file end header + FileAppenderSegHeader end_header; + constructHeader(&end_header, FILE_APPEND_TYPE_FILE_END, 0, file->read_size, file); + writeHeader(&end_header, appender); + } } /* @@ -925,7 +976,6 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, size_t total_write_len = 0; char *in_buf = (char *)pgut_malloc(STDIO_BUFSIZE); int backup_seq = 0; - /* * FULL -> INCR -> DEST * 2 1 0 @@ -1550,10 +1600,11 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, return tmp_file->write_size; } -bool backup_remote_file(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok, FILE *out) +bool backup_remote_file(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok, FILE *out, + FileAppender* appender, char** fileBuffer) { char *errmsg = NULL; - int rc = fio_send_file(from_fullpath, to_fullpath, out, file, &errmsg); + int rc = fio_send_file(from_fullpath, to_fullpath, out, file, &errmsg, appender, fileBuffer); /* handle errors */ if (rc == FILE_MISSING) @@ -1590,7 +1641,8 @@ bool backup_remote_file(const char *from_fullpath, const char *to_fullpath, pgFi */ void backup_non_data_file_internal(const char *from_fullpath, fio_location from_location, - const char *to_fullpath, pgFile *file, bool missing_ok) + const char *to_fullpath, pgFile *file, bool missing_ok, + FileAppender* appender, char** fileBuffer) { FILE *in = NULL; FILE *out = NULL; @@ -1605,43 +1657,45 @@ backup_non_data_file_internal(const char *from_fullpath, fio_location from_locat file->uncompressed_size = 0; /* open backup file for write */ - out = fopen(to_fullpath, PG_BINARY_W); - if (out == NULL) - { - if (file->external_dir_num) + if (current.media_type != MEDIA_TYPE_OSS) { + out = fopen(to_fullpath, PG_BINARY_W); + if (out == NULL) { - char parent[MAXPGPATH]; - errno_t rc = 0; + if (file->external_dir_num) + { + char parent[MAXPGPATH]; + errno_t rc = 0; - rc = strncpy_s(parent, MAXPGPATH, to_fullpath, MAXPGPATH - 1); - securec_check_c(rc, "", ""); - get_parent_directory(parent); + rc = strncpy_s(parent, MAXPGPATH, to_fullpath, MAXPGPATH - 1); + securec_check_c(rc, "", ""); + get_parent_directory(parent); - dir_create_dir(parent, DIR_PERMISSION); - out = fopen(to_fullpath, PG_BINARY_W); - if (out == NULL) + dir_create_dir(parent, DIR_PERMISSION); + out = fopen(to_fullpath, PG_BINARY_W); + if (out == NULL) + elog(ERROR, "Cannot open destination file \"%s\": %s", + to_fullpath, strerror(errno)); + } + else + { elog(ERROR, "Cannot open destination file \"%s\": %s", to_fullpath, strerror(errno)); + } } - else + + /* update file permission */ + if (!is_dss_type(file->type)) { - elog(ERROR, "Cannot open destination file \"%s\": %s", - to_fullpath, strerror(errno)); + if (chmod(to_fullpath, file->mode) == -1) + elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, + strerror(errno)); } } - /* update file permission */ - if (!is_dss_type(file->type)) - { - if (chmod(to_fullpath, file->mode) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, - strerror(errno)); - } - /* backup remote file */ if (fio_is_remote(FIO_DB_HOST)) { - if (!backup_remote_file(from_fullpath, to_fullpath, file, missing_ok, out)) + if (!backup_remote_file(from_fullpath, to_fullpath, file, missing_ok, out, appender, fileBuffer)) goto cleanup; } /* backup local file */ @@ -1670,7 +1724,9 @@ backup_non_data_file_internal(const char *from_fullpath, fio_location from_locat /* disable stdio buffering for local input/output files to avoid triple buffering */ setvbuf(in, NULL, _IONBF, BUFSIZ); - setvbuf(out, NULL, _IONBF, BUFSIZ); + if (current.media_type != MEDIA_TYPE_OSS) { + setvbuf(out, NULL, _IONBF, BUFSIZ); + } /* allocate 64kB buffer */ buf = (char *)pgut_malloc(CHUNK_SIZE); @@ -1686,12 +1742,28 @@ backup_non_data_file_internal(const char *from_fullpath, fio_location from_locat if (read_len > 0) { - if (fwrite(buf, 1, read_len, out) != (size_t)read_len) - elog(ERROR, "Cannot write to file \"%s\": %s", to_fullpath, - strerror(errno)); + if (current.media_type == MEDIA_TYPE_OSS) { + if (fileBuffer != NULL) { + int rc = memcpy_s(*fileBuffer, read_len, buf, read_len); + securec_check_c(rc, "\0", "\0"); + *fileBuffer += read_len; + } else { + /* Update CRC */ + COMP_FILE_CRC32(true, file->crc, buf, read_len); + /* write data page */ + FileAppenderSegHeader content_header; + constructHeader(&content_header, FILE_APPEND_TYPE_FILE_CONTENT, read_len, 0, file); + writeHeader(&content_header, appender); + writePayload((char*)buf, read_len, appender); + } + } else { + if (fwrite(buf, 1, read_len, out) != (size_t)read_len) + elog(ERROR, "Cannot write to file \"%s\": %s", to_fullpath, + strerror(errno)); - /* update CRC */ - COMP_FILE_CRC32(true, file->crc, buf, read_len); + /* update CRC */ + COMP_FILE_CRC32(true, file->crc, buf, read_len); + } file->read_size += read_len; } @@ -1712,7 +1784,7 @@ backup_non_data_file_internal(const char *from_fullpath, fio_location from_locat if (in && fclose(in)) elog(ERROR, "Cannot close the file \"%s\": %s", from_fullpath, strerror(errno)); - if (out && fclose(out)) + if (current.media_type != MEDIA_TYPE_OSS && out && fclose(out)) elog(ERROR, "Cannot close the file \"%s\": %s", to_fullpath, strerror(errno)); pg_free(buf); @@ -2336,7 +2408,7 @@ int send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, - BackupMode backup_mode) + BackupMode backup_mode, FileAppender* appender, char* fileBuffer) { FILE *in = NULL; FILE *out = NULL; @@ -2427,7 +2499,7 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f else if (rc == PageIsOk) { /* lazily open backup file (useful for s3) */ - if (!out) + if (current.media_type != MEDIA_TYPE_OSS && !out) out = open_local_file_rw(to_fullpath, &out_buf, STDIO_BUFSIZE); BackupPageHeader2 *header = pgut_new(BackupPageHeader2); @@ -2445,7 +2517,8 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f compressed_size = compress_and_backup_page(file, blknum, in, out, &(file->crc), rc, curr_page, calg, clevel, - from_fullpath, to_fullpath); + from_fullpath, to_fullpath, + appender, fileBuffer ? &fileBuffer : NULL); cur_pos_out += compressed_size + sizeof(BackupPageHeader); } @@ -2486,7 +2559,7 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f elog(ERROR, "Cannot close the source file \"%s\": %s", to_fullpath, strerror(errno)); /* close local output file */ - if (out && fclose(out)) + if (current.media_type != MEDIA_TYPE_OSS && out && fclose(out)) elog(ERROR, "Cannot close the backup file \"%s\": %s", to_fullpath, strerror(errno)); pg_free(iter); @@ -2521,11 +2594,17 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b if (file->n_headers <= 0) return NULL; + if (current.media_type == MEDIA_TYPE_OSS) { + pthread_lock(&(hdr_map->mutex)); + restoreConfigFile(hdr_map->path); + pthread_mutex_unlock(&(hdr_map->mutex)); + } + in = fopen(hdr_map->path, PG_BINARY_R); if (!in) { - elog(strict ? ERROR : WARNING, "Cannot open header file \"%s\": %s", hdr_map->path, strerror(errno)); + elog(strict ? ERROR : WARNING, "Cannot open header file1 \"%s\": %s", hdr_map->path, strerror(errno)); return NULL; } /* disable buffering for header file */ @@ -2603,6 +2682,10 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b headers = NULL; } + if (current.media_type == MEDIA_TYPE_OSS) { + remove(hdr_map->path); + } + return headers; } diff --git a/src/bin/pg_probackup/delete.cpp b/src/bin/pg_probackup/delete.cpp index 0dc96d58a4..cd5541c095 100644 --- a/src/bin/pg_probackup/delete.cpp +++ b/src/bin/pg_probackup/delete.cpp @@ -15,6 +15,7 @@ #include #include #include "common/fe_memutils.h" +#include "oss/include/oss_operator.h" static void delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tli, uint32 xlog_seg_size, bool dry_run); @@ -39,6 +40,9 @@ do_delete(time_t backup_id) pgBackup *target_backup = NULL; size_t size_to_delete = 0; char size_to_delete_pretty[20]; + Oss::Oss* oss = getOssClient(); + char* bucket_name = NULL; + /* Get complete list of backups */ backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); @@ -93,6 +97,12 @@ do_delete(time_t backup_id) if (!dry_run) { + if (current.media_type == MEDIA_TYPE_OSS) { + bucket_name = getBucketName(); + if (!oss->BucketExists(bucket_name)) { + elog(ERROR, "bucket %s not found.", bucket_name); + } + } /* Lock marked for delete backups */ catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0, false); @@ -105,6 +115,17 @@ do_delete(time_t backup_id) elog(ERROR, "interrupted during delete backup"); delete_backup_files(backup); + if (current.media_type == MEDIA_TYPE_OSS) { + char* prefix_name = getPrefixName(backup); + parray *delete_obj_list = parray_new(); + oss->ListObjectsWithPrefix(bucket_name, prefix_name, delete_obj_list); + for (size_t j = 0; j < parray_num(delete_obj_list); j++) { + char* object = (char*)parray_get(delete_obj_list, j); + oss->RemoveObject(bucket_name, object); + elog(INFO, "Object '%s' successfully deleted from S3", object); + } + parray_free(delete_obj_list); + } } } @@ -165,6 +186,10 @@ void do_retention(void) backup_deleted = false; backup_merged = false; + if (current.media_type == MEDIA_TYPE_OSS) { + elog(ERROR, "Not supported when specifying OSS options"); + } + /* Get a complete list of backups. */ backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); @@ -1018,7 +1043,6 @@ do_delete_instance(void) size_t i = 0; char instance_config_path[MAXPGPATH]; - /* Delete all backups. */ backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); @@ -1039,7 +1063,7 @@ do_delete_instance(void) /* Delete backup instance config file */ join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); - if (remove(instance_config_path)) + if (current.media_type != MEDIA_TYPE_OSS && remove(instance_config_path)) { elog(ERROR, "Can't remove \"%s\": %s", instance_config_path, strerror(errno)); @@ -1054,6 +1078,25 @@ do_delete_instance(void) elog(ERROR, "Can't remove \"%s\": %s", arclog_path, strerror(errno)); + if (current.media_type == MEDIA_TYPE_OSS) { + Oss::Oss* oss = getOssClient(); + char* bucket_name = getBucketName(); + if (!oss->BucketExists(bucket_name)) { + elog(ERROR, "bucket %s not found.", bucket_name); + } else { + // delete backups first + parray *delete_list = parray_new(); + char* prefix_name = backup_instance_path + 1; + oss->ListObjectsWithPrefix(bucket_name, prefix_name, delete_list); + for (i = 0; i < parray_num(delete_list); i++) { + char* object = (char*)parray_get(delete_list, i); + oss->RemoveObject(bucket_name, object); + elog(INFO, "Object '%s' successfully deleted from S3", object); + } + parray_free(delete_list); + } + } + elog(INFO, "Instance '%s' successfully deleted", instance_name); return 0; } @@ -1069,6 +1112,8 @@ do_delete_status(InstanceConfig *instance_config, const char *status) size_t size_to_delete = 0; char size_to_delete_pretty[20]; pgBackup *backup; + Oss::Oss* oss = getOssClient(); + char* bucket_name = NULL; BackupStatus status_for_delete = str2status(status); delete_list = parray_new(); @@ -1126,6 +1171,13 @@ do_delete_status(InstanceConfig *instance_config, const char *status) catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0, false); } + if (current.media_type == MEDIA_TYPE_OSS) { + bucket_name = getBucketName(); + if (!oss->BucketExists(bucket_name)) { + elog(ERROR, "bucket %s not found.", bucket_name); + } + } + /* delete and calculate free size from delete_list */ for (i = 0; i < parray_num(delete_list); i++) { @@ -1140,6 +1192,17 @@ do_delete_status(InstanceConfig *instance_config, const char *status) if (!dry_run) { delete_backup_files(backup); + if (current.media_type == MEDIA_TYPE_OSS) { + char* prefix_name = getPrefixName(backup); + parray *delete_list = parray_new(); + oss->ListObjectsWithPrefix(bucket_name, prefix_name, delete_list); + for (size_t j = 0; j < parray_num(delete_list); j++) { + char* object = (char*)parray_get(delete_list, j); + oss->RemoveObject(bucket_name, object); + elog(INFO, "Object '%s' successfully deleted from S3", object); + } + parray_free(delete_list); + } } n_deleted++; diff --git a/src/bin/pg_probackup/dir.cpp b/src/bin/pg_probackup/dir.cpp index b2f21264d0..e07894a00d 100644 --- a/src/bin/pg_probackup/dir.cpp +++ b/src/bin/pg_probackup/dir.cpp @@ -26,6 +26,7 @@ #include "common/fe_memutils.h" #include "PageCompression.h" #include "storage/file/fio_device.h" +#include "oss/include/restore.h" /* * The contents of these directories are removed or recreated during server @@ -1760,6 +1761,10 @@ dir_read_file_list(const char *root, const char *external_prefix, char stdio_buf[STDIO_BUFSIZE]; pg_crc32 content_crc = 0; + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigFile(file_txt); + } + fp = fio_open_stream(file_txt, location); if (fp == NULL) elog(ERROR, "cannot open \"%s\": %s", file_txt, strerror(errno)); @@ -1879,6 +1884,10 @@ dir_read_file_list(const char *root, const char *external_prefix, return NULL; } + if (current.media_type == MEDIA_TYPE_OSS) { + remove(file_txt); + } + return files; } @@ -2084,6 +2093,9 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ parray_append(backup_files_list, file); } + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(database_map_path, database_map_path); + } } /* diff --git a/src/bin/pg_probackup/file.cpp b/src/bin/pg_probackup/file.cpp index 29c2184339..f5ccf7e391 100644 --- a/src/bin/pg_probackup/file.cpp +++ b/src/bin/pg_probackup/file.cpp @@ -17,6 +17,7 @@ #include "storage/checksum.h" #include "storage/file/fio_device.h" #include "common/fe_memutils.h" +#include "oss/include/appender.h" #define PRINTF_BUF_SIZE 1024 @@ -1206,7 +1207,7 @@ static void fio_load_file(int out, char const* path) int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber* err_blknum, char **errormsg, - BackupPageHeader2 **headers) + BackupPageHeader2 **headers, FileAppender* appender, char** fileBuffer) { FILE *out = NULL; char *out_buf = NULL; @@ -1330,15 +1331,29 @@ int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *f COMP_FILE_CRC32(true, file->crc, buf, hdr.size); /* lazily open backup file */ - if (!out) + if (current.media_type != MEDIA_TYPE_OSS && !out) out = open_local_file_rw(to_fullpath, &out_buf, STDIO_BUFSIZE); - if (fio_fwrite(out, buf, hdr.size) != hdr.size) - { - fio_fclose(out); - *err_blknum = blknum; - return WRITE_FAILED; + if (current.media_type == MEDIA_TYPE_OSS) { + if (fileBuffer != NULL) { + int rc = memcpy_s(*fileBuffer, hdr.size, buf, hdr.size); + securec_check_c(rc, "\0", "\0"); + *fileBuffer += hdr.size; + } else { + /* write data page */ + FileAppenderSegHeader content_header; + constructHeader(&content_header, FILE_APPEND_TYPE_FILE_CONTENT, hdr.size, 0, file); + writeHeader(&content_header, appender); + writePayload((char*)buf, hdr.size, appender); + } + } else { + if (fio_fwrite(out, buf, hdr.size) != hdr.size) { + fio_fclose(out); + *err_blknum = blknum; + return WRITE_FAILED; + } } + file->write_size += hdr.size; file->uncompressed_size += BLCKSZ; } @@ -1346,7 +1361,7 @@ int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *f elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); } - if (out) + if (current.media_type != MEDIA_TYPE_OSS && out) fclose(out); pg_free(out_buf); @@ -1681,7 +1696,8 @@ static char *ProcessErrorIn(int out, fio_header &hdr, const char *fromFullpath) * If pgFile is not NULL then we must calculate crc and read_size for it. */ int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, - pgFile *file, char **errormsg) + pgFile *file, char **errormsg, + FileAppender* appender, char** fileBuffer) { fio_header hdr; int exit_code = SEND_OK; @@ -1727,17 +1743,32 @@ int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, } IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - /* We have received a chunk of data data, lets write it out */ - if (fwrite(buf, 1, hdr.size, out) != hdr.size) - { - exit_code = WRITE_FAILED; - break; - } - - if (file) - { + if (current.media_type == MEDIA_TYPE_OSS) { + if (fileBuffer != NULL) { + int rc = memcpy_s(*fileBuffer, hdr.size, buf, hdr.size); + securec_check_c(rc, "\0", "\0"); + *fileBuffer += hdr.size; + } else { + /* Update CRC */ + COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + /* write data page */ + FileAppenderSegHeader content_header; + constructHeader(&content_header, FILE_APPEND_TYPE_FILE_CONTENT, hdr.size, 0, file); + writeHeader(&content_header, appender); + writePayload((char*)buf, hdr.size, appender); + } file->read_size += hdr.size; - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + } else { + /* We have received a chunk of data data, lets write it out */ + if (fwrite(buf, 1, hdr.size, out) != hdr.size) { + exit_code = WRITE_FAILED; + break; + } + + if (file) { + file->read_size += hdr.size; + COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + } } } else diff --git a/src/bin/pg_probackup/init.cpp b/src/bin/pg_probackup/init.cpp index 70f43a0853..ba76730b66 100644 --- a/src/bin/pg_probackup/init.cpp +++ b/src/bin/pg_probackup/init.cpp @@ -9,6 +9,8 @@ */ #include "pg_probackup.h" +#include "oss/include/oss_operator.h" +#include "oss/include/restore.h" #include #include @@ -77,6 +79,14 @@ do_add_instance(InstanceConfig *instance) if (access(arclog_path_dir, F_OK) != 0) elog(ERROR, "Directory does not exist: '%s'", arclog_path_dir); + if (current.media_type == MEDIA_TYPE_OSS) { + Oss::Oss* oss = getOssClient(); + char* bucket_name = getBucketName(); + if (!oss->BucketExists(bucket_name)) { + elog(ERROR, "Bucket '%s' does not exist on OSS, please create it first.", bucket_name); + } + } + if (stat(instance->backup_instance_path, &st) == 0 && S_ISDIR(st.st_mode)) elog(ERROR, "Instance '%s' backup directory already exists: '%s'", instance->name, instance->backup_instance_path); @@ -125,6 +135,17 @@ do_add_instance(InstanceConfig *instance) config_set_opt(instance_options, &instance_config.remote.ssh_config, SOURCE_DEFAULT); + config_set_opt(instance_options, &instance_config.oss.access_id, + SOURCE_DEFAULT); + config_set_opt(instance_options, &instance_config.oss.access_key, + SOURCE_DEFAULT); + config_set_opt(instance_options, &instance_config.oss.endpoint, + SOURCE_DEFAULT); + config_set_opt(instance_options, &instance_config.oss.region, + SOURCE_DEFAULT); + config_set_opt(instance_options, &instance_config.oss.access_bucket, + SOURCE_DEFAULT); + /* pgdata and vgname were set through command line */ do_set_config(true); diff --git a/src/bin/pg_probackup/merge.cpp b/src/bin/pg_probackup/merge.cpp index 17088e66d9..b29c130675 100644 --- a/src/bin/pg_probackup/merge.cpp +++ b/src/bin/pg_probackup/merge.cpp @@ -121,6 +121,11 @@ do_merge(time_t backup_id) pgBackup *dest_backup = NULL; pgBackup *full_backup = NULL; + + if (current.media_type == MEDIA_TYPE_OSS) { + elog(ERROR, "Not supported when specifying OSS options"); + } + if (backup_id == INVALID_BACKUP_ID) elog(ERROR, "required parameter is not specified: --backup-id"); diff --git a/src/bin/pg_probackup/oss/appender.cpp b/src/bin/pg_probackup/oss/appender.cpp new file mode 100644 index 0000000000..67d1c4beeb --- /dev/null +++ b/src/bin/pg_probackup/oss/appender.cpp @@ -0,0 +1,227 @@ +/*------------------------------------------------------------------------- + * + * appender.cpp: Appender used by Backup/Recovery manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#include "include/appender.h" +#include "include/buffer.h" +#include "workload/gscgroup.h" +#include "include/restore.h" +#include "common/fe_memutils.h" + +void initFileAppender(FileAppender* appender, FILE_APPEND_SEG_TYPE type, uint32 minFileNo, uint32 maxFileNo) +{ + appender->fileNo = maxFileNo; + appender->minFileNo = minFileNo; + appender->maxFileNo = maxFileNo; + appender->type = type; + appender->currFileSize = 0; + appender->filePtr = NULL; + appender->currFileName = getAppendFileName(appender->baseFileName, appender->fileNo); + appender->filePtr = openWriteBufferFile(appender->currFileName, "wb"); + FileAppenderSegHeader header; + header.type = type; + header.size = 0; + header.permission = 0; + header.filesize = 0; + header.crc = 0; + header.external_dir_num = 0; + header.file_type = DEV_TYPE_INVALID; + writeHeader(&header,appender); +} + +void initSegDescriptor(FileAppenderSegDescriptor** segDesc) +{ + FileAppenderSegDescriptor* desc = (FileAppenderSegDescriptor*)palloc(sizeof(FileAppenderSegDescriptor)); + if (desc == NULL) { + elog(ERROR, "Failed to allocate memory for seg descriptor."); + return; + } + desc->header.type = FILE_APPEND_TYPE_UNKNOWN; + desc->header_offset = -1; + desc->payload_offset = -1; + desc->crc = 0; + desc->payload = NULL; + desc->outputFile = NULL; + desc->inputFile = NULL; + *segDesc = desc; +} + +void getSegDescriptor(FileAppenderSegDescriptor* desc, char** buffOffset, size_t* remainBuffLen, BufferCxt* cxt) +{ + errno_t rc; + *remainBuffLen = *remainBuffLen - sizeof(FileAppenderSegHeader); + /* The header may span across two buffs. + * So, we cannot directly copy the header from the buffer. + */ + if (likely(desc->header_offset == -1)) { + rc = memcpy_s(&desc->header, sizeof(FileAppenderSegHeader), *buffOffset, sizeof(FileAppenderSegHeader)); + securec_check(rc, "\0", "\0"); + desc->payload = *buffOffset + sizeof(FileAppenderSegHeader); + *buffOffset = desc->payload; + } else { + rc = memcpy_s(&desc->header, desc->header_offset, *buffOffset, desc->header_offset); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(&desc->header + desc->header_offset, (sizeof(FileAppenderSegHeader) - desc->header_offset), + cxt->bufData, (sizeof(FileAppenderSegHeader) - desc->header_offset)); + securec_check(rc, "\0", "\0"); + desc->payload = cxt->bufData + (sizeof(FileAppenderSegHeader) - desc->header_offset); + *buffOffset = desc->payload; + } +} + +void parseSegDescriptor(FileAppenderSegDescriptor* desc, char** buffOffset, size_t* remainBuffLen, char* tempBuffer, + BufferCxt* cxt, pgBackup* dest_backup, bool isValidate, validate_files_arg* arg) { + error_t rc = 0; + parray* files = dest_backup->files; + *remainBuffLen = *remainBuffLen - desc->header.size; + + if (desc->payload_offset == -1) { + rc = memcpy_s(tempBuffer, desc->header.size, desc->payload, desc->header.size); + securec_check(rc, "\0", "\0"); + *buffOffset = desc->payload + desc->header.size; + } else { + rc = memcpy_s(tempBuffer, desc->payload_offset, desc->payload, desc->payload_offset); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(tempBuffer + desc->payload_offset, (desc->header.size - desc->payload_offset), + cxt->bufData, (desc->header.size - desc->payload_offset)); + securec_check(rc, "\0", "\0"); + *buffOffset = cxt->bufData + (desc->header.size - desc->payload_offset); + desc->payload_offset = -1; + } + + if (desc->header.type == FILE_APPEND_TYPE_FILES_END || desc->header.type == FILE_APPEND_TYPE_FILES) { + return; + } else if (desc->header.type == FILE_APPEND_TYPE_DIR) { + restoreDir(tempBuffer, desc, dest_backup, files, isValidate); + } else if (desc->header.type == FILE_APPEND_TYPE_FILE) { + openRestoreFile(tempBuffer, desc, dest_backup, files, isValidate, arg); + } else if (desc->header.type == FILE_APPEND_TYPE_FILE_CONTENT) { + writeOrValidateRestoreFile(tempBuffer, desc, isValidate, arg); + } else if (desc->header.type == FILE_APPEND_TYPE_FILE_END) { + closeRestoreFile(desc); + } else { + if (isValidate) { + arg->corrupted = true; + } else { + elog(ERROR, "Unknown file type: %d, when restore file: %s", desc->header.type, desc->inputFile->rel_path); + } + } +} + +void destorySegDescriptor(FileAppenderSegDescriptor** descriptor) +{ + FileAppenderSegDescriptor* desc = *descriptor; + pfree_ext(desc); +} + +void closeFileAppender(FileAppender* appender) +{ + if (!appender) { + return; + } + FileAppenderSegHeader header; + header.type = FILE_APPEND_TYPE_FILES_END; + header.size = 0; + header.permission = 0; + header.filesize = 0; + header.crc = 0; + header.external_dir_num = 0; + header.file_type = DEV_TYPE_INVALID; + ((BufferCxt *)appender->filePtr)->fileEnd = true; + writeHeader(&header, appender); +} + +void destoryFileAppender(FileAppender** retAppender) +{ + FileAppender* appender = *retAppender; + if (appender != NULL) { + if (appender->baseFileName != NULL) { + pfree_ext(appender->baseFileName); + } + if (appender->currFileName != NULL) { + pfree_ext(appender->currFileName); + } + pfree_ext(appender); + appender = NULL; + } +} + +char* getAppendFileName(const char* baseFileName, uint32 fileNo) +{ + char* fileName = NULL; + if (baseFileName == NULL) { + return NULL; + } + size_t nameLen = strlen(baseFileName) + APPEND_FILENAME_END_SIZE + APPEND_FILENAME_END_DIGIT; + fileName = (char*)palloc(nameLen); + if (fileName == NULL) { + elog(ERROR, "Failed to allocate memory for file name"); + } + errno_t rc = snprintf_s(fileName, nameLen, (nameLen - 1), "%s/file-%u.pbk", baseFileName, fileNo); + securec_check_ss_c(rc, "\0", "\0"); + return fileName; +} + +void constructHeader(FileAppenderSegHeader* header, FILE_APPEND_SEG_TYPE type, + uint32 size, off_t filesize, pgFile* file) +{ + header->type = type; + header->size = size; + header->permission = file->mode; + header->filesize = filesize; + header->crc = file->crc; + header->external_dir_num = file->external_dir_num; + header->file_type = file->type; +} + +void writeHeader(FileAppenderSegHeader* header, FileAppender* appender) +{ + size_t writeLen = 0; + if (!appender || ((appender->currFileSize + APPEND_FILE_HEADER_SIZE) > APPEND_FILE_MAX_SIZE)) { + elog(ERROR, "Write header failed."); + } + if (header->type != FILE_APPEND_TYPE_FILES_END && (appender->currFileSize + APPEND_FILE_HEADER_SIZE + header->size) > + (APPEND_FILE_MAX_SIZE - APPEND_FILE_HEADER_SIZE)) { + uint32 minFileNo = appender->minFileNo; + uint32 maxFileNo = appender->maxFileNo; + closeFileAppender(appender); + initFileAppender(appender, FILE_APPEND_TYPE_FILES, minFileNo, maxFileNo + 1); + } + /* filePtr is a buffer context*/ + writeLen = writeToCompFile((char*)header, sizeof(FileAppenderSegHeader), appender->filePtr); + if (writeLen != sizeof(FileAppenderSegHeader)) { + elog(ERROR, "Write header failed, write length: %lu.", writeLen); + } + appender->currFileSize += writeLen; +} + + +size_t writeToCompFile(const char* data, size_t len, void* file) +{ + if (writeToBuffer(data, len, file) != len) { + return 0; + } + return len; +} + +void writePayload(const char* data, size_t len, FileAppender* appender) +{ + if (appender->currFileSize + len > (APPEND_FILE_MAX_SIZE - APPEND_FILE_HEADER_SIZE)) { + uint32 minFileNo = appender->minFileNo; + uint32 maxFileNo = appender->maxFileNo; + closeFileAppender(appender); + initFileAppender(appender, FILE_APPEND_TYPE_FILES, minFileNo, maxFileNo + 1); + } + size_t writeLen = writeToCompFile(data, len, appender->filePtr); + if (writeLen != len) { + elog(ERROR, "Write payload data failed, write length: %lu.", writeLen); + } + appender->currFileSize += writeLen; +} \ No newline at end of file diff --git a/src/bin/pg_probackup/oss/backup.cpp b/src/bin/pg_probackup/oss/backup.cpp new file mode 100644 index 0000000000..5ce1771986 --- /dev/null +++ b/src/bin/pg_probackup/oss/backup.cpp @@ -0,0 +1,317 @@ +/*------------------------------------------------------------------------- + * + * backup.cpp: Backup api used by Backup/Recovery manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#include "include/backup.h" +#include "storage/file/fio_device.h" +#include "common/fe_memutils.h" + +/* Progress Counter */ +static int g_doneFiles = 0; +static int g_totalFiles = 0; +static volatile bool g_progressFlag = false; +static pthread_cond_t g_cond = PTHREAD_COND_INITIALIZER; +static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; + +static void handleZeroSizeFile(FileAppender *appender, pgFile* file); +static void *ProgressReportProbackup(void *arg); + +void performBackup(backup_files_arg* arg) +{ + backupReaderThreadArgs* thread_args = (backupReaderThreadArgs*)palloc(sizeof(backupReaderThreadArgs) * current.readerThreadCount); + initPerformBackup(arg, thread_args); + backupDataFiles(arg); + pfree(thread_args); +} + +void initPerformBackup(backup_files_arg* arg, backupReaderThreadArgs* thread_args) +{ + startBackupSender(); + startBackupReaders(arg, thread_args); +} + +void backupDataFiles(backup_files_arg* arg) +{ + FileAppender* appender = NULL; + /* bucket name: instance_name; object name: /instance_name/backupset_name/file-%d.pbk + * instance_name complies with the naming rules of object storage service + */ + appender = (FileAppender*)palloc(sizeof(FileAppender)); + if (appender == NULL) { + elog(ERROR, "Failed to allocate memory for appender"); + return; + } + appender->baseFileName = pg_strdup(current.root_dir); + initFileAppender(appender, FILE_APPEND_TYPE_FILES, 0, 0); + /* backup starts */ + backupDirectories(appender, arg); + backupFiles(appender, arg); + /* clean up FileAppender and reader threads*/ + flushReaderContexts(arg); + closeFileAppender(appender); + destoryFileAppender(&appender); + stopBackupReaders(); + destoryBackupReaderContexts(); + /* no more new data to be produced, backup ends */ + setSenderState(current.sender_cxt, SENDER_THREAD_STATE_FINISH); + waitForSenderThread(); + stopBackupSender(); + destoryBackupSenderContext(); +} + +void backupDirectories(FileAppender* appender, backup_files_arg* arg) +{ + int totalFiles = (int)parray_num(arg->files_list); + g_totalFiles = totalFiles; + for (int i = 0; i < totalFiles; i++) { + pgFile* dir = (pgFile*) parray_get(arg->files_list, i); + /* if the entry was a directory, create it in the backup file */ + if (S_ISDIR(dir->mode)) { + char dirpath[MAXPGPATH]; + int nRet = snprintf_s(dirpath, MAXPGPATH, MAXPGPATH - 1, "%s", dir->rel_path); + securec_check_ss_c(nRet, "\0", "\0"); + // write into backup file + appendDir(appender, dirpath, DIR_PERMISSION, dir->external_dir_num, dir->type); + g_doneFiles++; + } + } +} + +void appendDir(FileAppender* appender, const char* dirPath, uint32 permission, + int external_dir_num, device_type_t type) +{ + FileAppenderSegHeader header; + size_t pathLen = strlen(dirPath); + header.type = FILE_APPEND_TYPE_DIR; + header.size = pathLen; + header.permission = permission; + header.filesize = 0; + header.crc = 0; + header.external_dir_num = external_dir_num; + header.file_type = type; + writeHeader(&header, appender); + writePayload((char*)dirPath, pathLen, appender); +} + +void backupFiles(FileAppender* appender, backup_files_arg* arg) +{ + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + static time_t prev_time; + time_t start_time, end_time; + char pretty_time[20]; + int n_backup_files_list = parray_num(arg->files_list); + prev_time = current.start_time; + + /* Sort by size for load balancing */ + parray_qsort(arg->files_list, pgFileCompareSize); + /* Sort the array for binary search */ + if (arg->prev_filelist) + parray_qsort(arg->prev_filelist, pgFileCompareRelPathWithExternal); + /* write initial backup_content.control file and update backup.control */ + write_backup_filelist(¤t, arg->files_list, + instance_config.pgdata, arg->external_dirs, true); + write_backup(¤t, true); + /* Init backup page header map */ + init_header_map(¤t); + /* Run threads */ + thread_interrupted = false; + elog(INFO, "Start backing up files"); + time(&start_time); + /* Create the thread for progress report */ + pthread_t progressThread; + pthread_create(&progressThread, nullptr, ProgressReportProbackup, nullptr); + + /* backup a file */ + for (int i = 0; i < n_backup_files_list; i++) { + pgFile *prev_file = NULL; + pgFile *file = (pgFile *) parray_get(arg->files_list, i); + /* We have already copied all directories */ + if (S_ISDIR(file->mode)) { + continue; + } + /* check for interrupt */ + if (interrupted || thread_interrupted) { + elog(ERROR, "interrupted during backup"); + } + if (progress) + elog_file(INFO, "Progress: (%d/%d). Process file \"%s\"", + i + 1, n_backup_files_list, file->rel_path); + /* update done_files */ + pg_atomic_add_fetch_u32((volatile uint32*) &g_doneFiles, 1); + + /* Handle zero sized files */ + if (file->size == 0) { + file->write_size = 0; + handleZeroSizeFile(appender, file); + continue; + } + + /* construct filepath */ + if (file->external_dir_num != 0) { + char external_dst[MAXPGPATH]; + char *external_path = (char *)parray_get(arg->external_dirs, + file->external_dir_num - 1); + + makeExternalDirPathByNum(external_dst, + arg->external_prefix, + file->external_dir_num); + + join_path_components(to_fullpath, external_dst, file->rel_path); + join_path_components(from_fullpath, external_path, file->rel_path); + } else if (is_dss_type(file->type)) { + join_path_components(from_fullpath, arg->src_dss, file->rel_path); + join_path_components(to_fullpath, arg->dst_dss, file->rel_path); + } else { + join_path_components(from_fullpath, arg->from_root, file->rel_path); + join_path_components(to_fullpath, arg->to_root, file->rel_path); + } + + /* Encountered some strange beast */ + if (!S_ISREG(file->mode)) { + elog(WARNING, "Unexpected type %d of file \"%s\", skipping", + file->mode, from_fullpath); + } + + /* Check that file exist in previous backup */ + if (current.backup_mode != BACKUP_MODE_FULL) { + pgFile **prev_file_tmp = NULL; + prev_file_tmp = (pgFile **) parray_bsearch(arg->prev_filelist, + file, pgFileCompareRelPathWithExternal); + if (prev_file_tmp) { + /* File exists in previous backup */ + file->exists_in_prev = true; + prev_file = *prev_file_tmp; + } + } + + /* special treatment for global/pg_control */ + if (file->external_dir_num == 0 && strcmp(file->name, PG_XLOG_CONTROL_FILE) == 0) { + char* filename = last_dir_separator(to_fullpath); + char* dirpath = strndup(to_fullpath, filename - to_fullpath + 1); + fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST); + pg_free(dirpath); + } + + /* If the file size is less than 8MB, + * a load-balancing reason prevents the direct writing of the appender file + */ + if (file->size <= FILE_BUFFER_SIZE && current.readerThreadCount > 0) { + int thread_slot = getFreeReaderThread(); + while (thread_slot == -1) { + flushReaderContexts(arg); + thread_slot = getFreeReaderThread(); + } + ReaderCxt* reader_cxt = ¤t.readerCxt[thread_slot]; + int current_fileidx = reader_cxt->fileCount; + reader_cxt->file[current_fileidx] = file; + reader_cxt->prefile[current_fileidx] = prev_file; + reader_cxt->fromPath[current_fileidx] = pgut_strdup(from_fullpath); + reader_cxt->toPath[current_fileidx] = pgut_strdup(to_fullpath); + reader_cxt->appender = appender; + reader_cxt->segType[current_fileidx] = FILE_APPEND_TYPE_FILE; + reader_cxt->fileRemoved[current_fileidx] = false; + reader_cxt->fileCount++; + if (reader_cxt->fileCount == READER_THREAD_FILE_COUNT) { + setReaderState(reader_cxt, READER_THREAD_STATE_START); + } + } else { + if (file->is_datafile && !file->is_cfs) { + backup_data_file(&(arg->conn_arg), file, from_fullpath, to_fullpath, + arg->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + arg->nodeInfo->checksum_version, + arg->hdr_map, false, appender, NULL); + } else { + backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, true, appender, NULL); + } + } + + if (file->write_size == FILE_NOT_FOUND) { + continue; + } + + if (file->write_size == BYTES_INVALID) { + elog(VERBOSE, "Skipping the unchanged file: \"%s\"", from_fullpath); + continue; + } + } + g_progressFlag = true; + pthread_mutex_lock(&g_mutex); + pthread_cond_signal(&g_cond); + pthread_mutex_unlock(&g_mutex); + pthread_join(progressThread, nullptr); + + /* ssh connection to longer needed */ + fio_disconnect(); + /* Close connection */ + if (arg->conn_arg.conn) { + pgut_disconnect(arg->conn_arg.conn); + } + /* Data files transferring is successful */ + arg->ret = 0; + elog(INFO, "Finish backuping file"); + time(&end_time); + pretty_time_interval(difftime(end_time, start_time), + pretty_time, lengthof(pretty_time)); + elog(INFO, "Backup files are backuped to oss, time elapsed: %s", pretty_time); +} + + +/* static function*/ + +static void handleZeroSizeFile(FileAppender *appender, pgFile* file) +{ + size_t pathLen = strlen(file->rel_path); + FileAppenderSegHeader start_header; + constructHeader(&start_header, FILE_APPEND_TYPE_FILE, pathLen, 0, file); + writeHeader(&start_header, appender); + writePayload((char*)file->rel_path, pathLen, appender); + FileAppenderSegHeader end_header; + constructHeader(&end_header, FILE_APPEND_TYPE_FILE_END, 0, 0, file); + writeHeader(&end_header, appender); +} + +/* copy from backup.c for static variables*/ +static void *ProgressReportProbackup(void *arg) +{ + if (g_totalFiles == 0) { + return nullptr; + } + char progressBar[53]; + int percent; + do { + /* progress report */ + percent = (int)(g_doneFiles * 100 / g_totalFiles); + GenerateProgressBar(percent, progressBar); + fprintf(stdout, "Progress: %s %d%% (%d/%d, done_files/total_files). backup file \r", + progressBar, percent, g_doneFiles, g_totalFiles); + pthread_mutex_lock(&g_mutex); + timespec timeout; + timeval now; + gettimeofday(&now, nullptr); + timeout.tv_sec = now.tv_sec + 1; + timeout.tv_nsec = 0; + int ret = pthread_cond_timedwait(&g_cond, &g_mutex, &timeout); + pthread_mutex_unlock(&g_mutex); + if (ret == ETIMEDOUT) { + continue; + } else { + break; + } + } while ((g_doneFiles < g_totalFiles) && !g_progressFlag); + percent = 100; + GenerateProgressBar(percent, progressBar); + fprintf(stdout, "Progress: %s %d%% (%d/%d, done_files/total_files). backup file \n", + progressBar, percent, g_doneFiles, g_totalFiles); + return nullptr; +} \ No newline at end of file diff --git a/src/bin/pg_probackup/oss/buffer.cpp b/src/bin/pg_probackup/oss/buffer.cpp new file mode 100644 index 0000000000..f2f0a0f92d --- /dev/null +++ b/src/bin/pg_probackup/oss/buffer.cpp @@ -0,0 +1,176 @@ +/*------------------------------------------------------------------------- + * + * buffer.cpp: Buffer used by Backup/Recovery manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#include "include/buffer.h" +#include "include/thread.h" + +void initBufferCxt(BufferCxt* cxt, size_t bufferSize) +{ + size_t bufnum = (bufferSize + BUFSIZE -1) / BUFSIZE; + cxt->bufNum = bufnum; + cxt->bufHeader = (BufferDesc*)palloc(sizeof(BufferDesc) * bufnum); + cxt->bufData = (char*)palloc(BUFSIZE * bufnum); + cxt->fileEnd = false; + cxt->fileNum = -1; + cxt->producerIdx = 0; + cxt->producerIdxCache = 0; + cxt->consumerIdx = 0; + cxt->consumerIdxCache = 0; + cxt->earlyExit = false; + if (cxt->bufHeader == NULL || cxt->bufData == NULL) { + pfree_ext(cxt->bufHeader); + pfree_ext(cxt->bufData); + elog(ERROR, "buffer context allocate failed: out of memory"); + } + for(size_t i = 0; i < bufnum; i++) { + cxt->bufHeader[i].bufId = i; + cxt->bufHeader[i].fileId = -1; + cxt->bufHeader[i].usedLen = 0; + cxt->bufHeader[i].flags = 0; + pthread_spin_init(&cxt->bufHeader[i].lock, PTHREAD_PROCESS_PRIVATE); + } +} + +void destroyBufferCxt(BufferCxt* cxt) +{ + for(size_t i = 0; i < cxt->bufNum; i++) { + pthread_spin_destroy(&cxt->bufHeader[i].lock); + } + pfree_ext(cxt->bufHeader); + pfree_ext(cxt->bufData); +} + +BufferDesc* getNextFreeWriteBuffer(BufferCxt* cxt) +{ + BufferDesc* buff = NULL; + const size_t producerIdx = cxt->producerIdx.load(std::memory_order_relaxed); + const size_t nextIdx = (producerIdx + 1) % buffNum(cxt); + /* check whether the buffer queue is full */ + if (nextIdx == cxt->consumerIdxCache) { + cxt->consumerIdxCache = cxt->consumerIdx.load(std::memory_order_acquire); + if (nextIdx == cxt->consumerIdxCache) { + return NULL; + } + } + buff = &(cxt->bufHeader[producerIdx]); + if (testBufferFlag(buff, BUFF_FLAG_FILE_FINISHED | BUFF_FLAG_FILE_CLOSED)) { + cxt->producerIdx.store(nextIdx, std::memory_order_release); + return NULL; + } + return buff; +} + +BufferDesc* tryGetNextFreeWriteBuffer(BufferCxt* cxt) +{ + BufferDesc* buff = NULL; + while (!(buff = getNextFreeWriteBuffer(cxt))) { + pg_usleep(WAIT_FOR_BUFF_SLEEP_TIME); + continue; + } + if (buffFreeLen(buff) != 0) { + return buff; + } + return tryGetNextFreeWriteBuffer(cxt); +} + +BufferDesc* getNextFreeReadBuffer(BufferCxt* cxt) +{ + BufferDesc* buff = NULL; + const size_t consumerIdx = cxt->consumerIdx.load(std::memory_order_relaxed); + /* check whether the buffer queue is empty */ + if (consumerIdx == cxt->producerIdxCache) { + cxt->producerIdxCache = cxt->producerIdx.load(std::memory_order_acquire); + buff = &(cxt->bufHeader[consumerIdx]); + if (!testBufferFlag(buff, BUFF_FLAG_FILE_FINISHED | BUFF_FLAG_FILE_CLOSED) && + consumerIdx == cxt->producerIdxCache) { + return NULL; + } + } + buff = &(cxt->bufHeader[consumerIdx]); + // buffer read finished + if (!testBufferFlag(buff, BUFF_FLAG_FILE_FINISHED | BUFF_FLAG_FILE_CLOSED)) { + return NULL; + } + const size_t next = (consumerIdx + 1) % buffNum(cxt); + cxt->consumerIdx.store(next, std::memory_order_release); + return buff; +} + +BufferDesc* tryGetNextFreeReadBuffer(BufferCxt* cxt) +{ + BufferDesc* buff = NULL; + while (!(buff = getNextFreeReadBuffer(cxt))) { + pg_usleep(WAIT_FOR_BUFF_SLEEP_TIME); + } + /* the buffer is ready */ + if (buff->usedLen != 0) { + return buff; + } + return tryGetNextFreeReadBuffer(cxt); +} + +size_t writeToBuffer(const char* data, size_t len, void* fp) +{ + BufferCxt* cxt = (BufferCxt*)fp; + BufferDesc* buff = NULL; + int64 writeLen = 0; + int64 freeLen = 0; + int64 remainingLen = (int64)len; + errno_t rc; + while (remainingLen > 0) { + buff = tryGetNextFreeWriteBuffer(cxt); + if (buff == NULL) { + return 0; + } + freeLen = buffFreeLen(buff); + writeLen = (remainingLen > freeLen) ? freeLen : remainingLen; + rc = memcpy_s(buffFreeLoc(buff, cxt), writeLen, data, writeLen); + securec_check_c(rc, "\0", "\0"); + addBuffLen(buff, writeLen); + data = data + writeLen; + remainingLen = remainingLen - writeLen; + if (buffFreeLen(buff) == 0) { + markBufferFlag(buff, BUFF_FLAG_FILE_FINISHED); + } + if ((remainingLen == 0 && cxt->fileEnd)) { + markBufferFlag(buff, BUFF_FLAG_FILE_CLOSED); + } + } + return len; +} + +bool hasBufferForRead(BufferCxt* cxt) +{ + const size_t consumerIdx = cxt->consumerIdx.load(std::memory_order_acquire); + BufferDesc* buff = &(cxt->bufHeader[consumerIdx]); + return testBufferFlag(buff, BUFF_FLAG_FILE_FINISHED | BUFF_FLAG_FILE_CLOSED); +} + +void* openWriteBufferFile(const char* filename, const char* mode) +{ + BufferCxt* buffCxt = current.sender_cxt->bufferCxt; + BufferDesc* buff = NULL; + SendFileInfo* fileInfo = NULL; + fileInfo = (SendFileInfo*)palloc(sizeof(SendFileInfo)); + if (fileInfo == NULL) { + elog(ERROR, "file info allocate failed: out of memory"); + } + fileInfo->filename = pgut_strdup(filename); + parray_append(current.filesinfo, fileInfo); + buffCxt->fileEnd = false; + buff = tryGetNextFreeWriteBuffer(buffCxt); + if (buff == NULL) { + elog(ERROR, "Failed to open buff file: %s", fileInfo->filename); + } + markBufferFlag(buff, BUFF_FLAG_FILE_OPENED); + buff->fileId = parray_num(current.filesinfo) - 1; + return buffCxt; +} diff --git a/src/bin/pg_probackup/oss/include/appender.h b/src/bin/pg_probackup/oss/include/appender.h new file mode 100644 index 0000000000..83b6c23589 --- /dev/null +++ b/src/bin/pg_probackup/oss/include/appender.h @@ -0,0 +1,80 @@ +/*------------------------------------------------------------------------- + * + * appender.h: File appender used by Backup/Restore manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef FILE_APPEND_H +#define FILE_APPEND_H + +#include "../../pg_probackup.h" +#include "buffer.h" + +/* Data Structure Definition*/ + +typedef struct FileAppenderSegHeader +{ + FILE_APPEND_SEG_TYPE type; /* seg type */ + uint32 size; /* payload size */ + uint32 permission; + off_t filesize; + pg_crc32 crc; + int external_dir_num; + device_type_t file_type; +} FileAppenderSegHeader; + +typedef struct FileAppenderSegDescriptor +{ + FileAppenderSegHeader header; + char* payload; + int header_offset; /* set value only when header spans across two buffs and a rewind exists. */ + int payload_offset; /* set value only when payload spans across two buffs and a rewind exists. */ + FILE* outputFile; + pgFile* inputFile; + pg_crc32 crc; + pg_crc32 size; +} FileAppenderSegDescriptor; + +/* Constants Definition */ + +#define READ_BUFFER_BLOCK_COUNT 2 +#define APPEND_FILENAME_END_SIZE 10 +#define APPEND_FILENAME_END_DIGIT 6 +#define APPEND_FILE_MAX_SIZE 536870912 // 536870912, 512MB; 1073741824, 1GB +#define APPEND_FILE_HEADER_SIZE (sizeof(FileAppenderSegHeader)) + +/* API Function */ + +extern void initFileAppender(FileAppender* appender, FILE_APPEND_SEG_TYPE type, uint32 minFileNo, uint32 maxFileNo); + +extern void initSegDescriptor(FileAppenderSegDescriptor** segDesc); + +extern void getSegDescriptor(FileAppenderSegDescriptor* desc, char** buffOffset, size_t* remainBuffLen, BufferCxt* cxt); + +extern void parseSegDescriptor(FileAppenderSegDescriptor* segDesc, char** buffOffset, size_t* remainBuffLen, char* tempBuffer, + BufferCxt* cxt, pgBackup* dest_backup, + bool isValidate = false, validate_files_arg* arg = NULL); + +extern void destorySegDescriptor(FileAppenderSegDescriptor** descriptor); + +extern void closeFileAppender(FileAppender* retAppender); + +extern void destoryFileAppender(FileAppender** retAppender); + +extern char* getAppendFileName(const char* baseFileName, uint32 fileNo); + +extern void constructHeader(FileAppenderSegHeader* header, FILE_APPEND_SEG_TYPE type, + uint32 size, off_t filesize, pgFile* file); + +extern void writeHeader(FileAppenderSegHeader* header, FileAppender* appender); + +extern size_t writeToCompFile(const char* data, size_t len, void* file); + +extern void writePayload(const char* data, size_t len, FileAppender* appender); + +#endif /* FILE_APPEND_H */ + diff --git a/src/bin/pg_probackup/oss/include/backup.h b/src/bin/pg_probackup/oss/include/backup.h new file mode 100644 index 0000000000..6ce46afabc --- /dev/null +++ b/src/bin/pg_probackup/oss/include/backup.h @@ -0,0 +1,44 @@ +/*------------------------------------------------------------------------- + * + * backup.h: Backup utils used by Backup/Restore manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef BACKUP_H +#define BACKUP_H + +#include "../../pg_probackup.h" +#include "appender.h" +#include "thread.h" + +/* API Function */ + +extern void performBackup(backup_files_arg* arg); + +extern void initPerformBackup(backup_files_arg* arg, backupReaderThreadArgs* thread_args); + +extern void backupDataFiles(backup_files_arg* arg); + +extern void backupDirectories(FileAppender* appender, backup_files_arg* arg); + +extern void backupFiles(FileAppender* appender, backup_files_arg* arg); + +extern void appendDir(FileAppender* appender, const char* dirPath, uint32 permission, + int external_dir_num, device_type_t type); + +extern void appendDataFile(FileAppender* appender, char* fileBuffer, pgFile* file, + FILE_APPEND_SEG_TYPE type, char* from_fullpath, backup_files_arg* arg); + +extern void appendNonDataFile(FileAppender* appender, char* fileBuffer, pgFile* file, + FILE_APPEND_SEG_TYPE type, char* from_fullpath, backup_files_arg* arg); + +extern void appendPgControlFile(FileAppender* appender, const char *from_fullpath, fio_location from_location, pgFile *file); + +extern int writeDataFile(FileAppender* appender, char* fileBuffer, pgFile* file, char* from_fullpath, backup_files_arg* arg); + +#endif /* BACKUP_H */ + diff --git a/src/bin/pg_probackup/oss/include/buffer.h b/src/bin/pg_probackup/oss/include/buffer.h new file mode 100644 index 0000000000..81f88f3075 --- /dev/null +++ b/src/bin/pg_probackup/oss/include/buffer.h @@ -0,0 +1,136 @@ +/*------------------------------------------------------------------------- + * + * buffer.h: Buffer used by Backup/Recovery manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef BUFFER_H +#define BUFFER_H + +#include + +#include "../../pg_probackup.h" + + +/* Constants Definition */ + +#define BUFSIZE 2097152 /* 2 * 1024 * 1024, 2MB */ +#define WAIT_FOR_BUFF_SLEEP_TIME 100000 /* 100 ms*/ +#define BUFF_FLAG_FILE_OPENED 0x1 +#define BUFF_FLAG_FILE_CLOSED 0x2 +#define BUFF_FLAG_FILE_FINISHED 0x4 + +#ifdef __cpp_lib_hardware_interference_size + static constexpr size_t CacheLineSize = + std::hardware_constructive_interference_size; +#else + static constexpr size_t CacheLineSize = 64; +#endif + +/* Data Structure Definition*/ + +typedef struct SendFileInfo { + char* filename; +} SendFileInfo; + +/* each buffer's description */ +typedef struct BufferDesc +{ + uint32 bufId; + int32 fileId; /* the buffer belong to which file */ + uint32 usedLen; + uint32 flags; + pthread_spinlock_t lock; /* for lock schema */ +} BufferDesc; + +/* the context of buffers */ +typedef struct BufferCxt +{ + BufferDesc* bufHeader; + char* bufData; + uint32 bufNum; + bool fileEnd; + int fileNum; /* for restore */ + volatile bool earlyExit; + alignas(CacheLineSize) std::atomic producerIdx = {0}; + alignas(CacheLineSize) size_t producerIdxCache = 0; + alignas(CacheLineSize) std::atomic consumerIdx = {0}; + alignas(CacheLineSize) size_t consumerIdxCache = 0; +} BufferCxt; + +/* API Function */ + +extern void initBufferCxt(BufferCxt* cxt, size_t bufferSize); + +extern void destroyBufferCxt(BufferCxt* cxt); + +extern BufferDesc* tryGetNextFreeWriteBuffer(BufferCxt* cxt); + +extern BufferDesc* getNextFreeWriteBuffer(BufferCxt* cxt); + +extern BufferDesc* tryGetNextFreeReadBuffer(BufferCxt* cxt); + +extern BufferDesc* getNextFreeReadBuffer(BufferCxt* cxt); + +extern void* openWriteBufferFile(const char* filename, const char* mode); + +extern void closeWriteBufferFile(void* fp); + +extern size_t writeToBuffer(const char* data, size_t len, void* fp); + +extern bool hasBufferForRead(BufferCxt* cxt); + +extern bool hasNextBufferForRead(BufferCxt* cxt, const size_t buffIdx); + +/* inline function */ +inline uint32 buffFreeLen(BufferDesc* buff) +{ + return BUFSIZE - buff->usedLen; +} + +inline uint32 buffNum(BufferCxt* cxt) +{ + return cxt->bufNum; +} + +inline char* buffLoc(BufferDesc* buff, BufferCxt* cxt) +{ + return cxt->bufData + buff->bufId * BUFSIZE; +} + +inline char* buffFreeLoc(BufferDesc* buff, BufferCxt* cxt) +{ + return buffLoc(buff, cxt) + buff->usedLen; +} + +inline void addBuffLen(BufferDesc* buff, uint32 len) +{ + pthread_spin_lock(&buff->lock); + buff->usedLen += len; + pthread_spin_unlock(&buff->lock); +} + +inline void markBufferFlag(BufferDesc* buff, uint32 flag) +{ + (*((volatile uint32*)&(buff->flags))) |= flag; +} + +inline bool testBufferFlag(BufferDesc* buff, uint32 flag) +{ + return ((*((volatile uint32*)&(buff->flags))) & flag); +} + +inline void clearBuff(BufferDesc* buff) +{ + pthread_spin_lock(&buff->lock); + buff->usedLen = 0; + buff->flags = 0; + buff->fileId = -1; + pthread_spin_unlock(&buff->lock); +} + +#endif /* BUFFER_H */ \ No newline at end of file diff --git a/src/bin/pg_probackup/oss/include/oss_operator.h b/src/bin/pg_probackup/oss/include/oss_operator.h new file mode 100644 index 0000000000..5fcf0b2757 --- /dev/null +++ b/src/bin/pg_probackup/oss/include/oss_operator.h @@ -0,0 +1,91 @@ +/*------------------------------------------------------------------------- + * + * oss_operator.h: OSS Operator used by Backup/Restore manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef OSS_OPERATOR_H +#define OSS_OPERATOR_H + +#include "stddef.h" +#include +#include +#include +#include "../../parray.h" + +/* Constants Definition */ + +#define OSS_MAX_UPLOAD_ID_LEN 256 +#define OSS_MAX_FILE_PATH 1024 +#define OSS_MAX_ETAG_LEN 256 + +/* Data Structure Definition*/ + +typedef struct OssFile { + char filePath[OSS_MAX_FILE_PATH]; + /* for write */ + char uploadID[OSS_MAX_UPLOAD_ID_LEN]; + char** eTagList; + int partNum; + /* for read */ + size_t fileSize; + bool oss_eof; + bool oss_error; + void* bufDate; + int byteCout; + int actualLen; + size_t offset; + char etag[OSS_MAX_ETAG_LEN]; +} OssFile; + +/* API Function */ + +namespace Oss { +using namespace std; +using SDKOptions = void *; +using S3Client = void *; + +class Oss { +public: + Oss(const char* endpoint, const char* access_key, const char* secret_key, const char* region = NULL, bool secure = false); + ~Oss(); + void GetObject(const char* bucket_name, const char* object_name, const char* file_name); + void GetObject(const char* from_bucket, const char* object_key, void* file); + void PutObject(const char* bucket_name, const char* file_path, const char* file_name); + void RemoveObject(const char* bucket_name, const char* objcet_key); + void ListObjects(char* bucket_name, parray* objects); + void ListObjectsWithPrefix(char* bucket_name, char* prefix, parray* objects); + void MakeBucket(char* bucket_name); + void ListBuckets(parray* buckets); + bool ObjectExists(char* bucket_name, char* object_name); + bool BucketExists(char* bucket_name); + void RemoveBucket(char* bucket_name); + void StartMultipartUpload(char* bucket_name, char* object_name); + void MultipartUpload(char* bucket_name, char* object_name, char* data, size_t data_size); + void CompleteMultipartUploadRequest(char* bucket_name, char* object_name); + +private: + const string kEndpoint; + const string kAccessKey; + const string kSecretKey; + const bool kSecure; + string kRegion; + void* completePartVector; + string UploadId; + int partNumber; + SDKOptions options_; + S3Client s3_client_; +}; +} // namespace Oss + +extern void parseBackupControlFilePath(const char* path, char** bucket_name, char** object_name); +extern char* getBucketName(); +extern char* getPrefixName(void* backup); +extern Oss::Oss* getOssClient(); + +#endif /* OSS_OPERATOR_H */ + diff --git a/src/bin/pg_probackup/oss/include/restore.h b/src/bin/pg_probackup/oss/include/restore.h new file mode 100644 index 0000000000..a7498a38a9 --- /dev/null +++ b/src/bin/pg_probackup/oss/include/restore.h @@ -0,0 +1,46 @@ +/*------------------------------------------------------------------------- + * + * restore.h: Restore utils used by Backup/Restore manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef RESTORE_H +#define RESTORE_H + +#include "../../pg_probackup.h" +#include "appender.h" + +/* API Function */ + +extern void performValidate(pgBackup *backup, pgRestoreParams *params); + +extern void performRestoreOrValidate(pgBackup *dest_backup, bool isValidate = false); + +extern void restoreDir(const char* path, FileAppenderSegDescriptor* desc, pgBackup* dest_backup, + parray* files, bool isValidate); + +extern void restoreDataFile(const char* data, FileAppenderSegDescriptor* desc, parray *parent_chain, bool use_bitmap, + bool use_headers); + +extern void restoreNonDataFile(const char* data, FileAppenderSegDescriptor* desc, parray *parent_chain, pgBackup *dest_backup, + bool isValidate, validate_files_arg* arg); + +extern void openRestoreFile(const char* path, FileAppenderSegDescriptor* desc, pgBackup* dest_backup, + parray* files, bool isValidate = false, validate_files_arg* arg = NULL); + +extern void writeOrValidateRestoreFile(const char* data, FileAppenderSegDescriptor* desc, + bool isValidate, validate_files_arg* arg); + +extern void closeRestoreFile(FileAppenderSegDescriptor* desc); + +extern void restoreConfigDir(); + +extern void restoreConfigFile(const char* path); + +extern void uploadConfigFile(const char* local_path, const char* object_name); + +#endif /* RESTORE_H */ \ No newline at end of file diff --git a/src/bin/pg_probackup/oss/include/thread.h b/src/bin/pg_probackup/oss/include/thread.h new file mode 100644 index 0000000000..1dfbdc9360 --- /dev/null +++ b/src/bin/pg_probackup/oss/include/thread.h @@ -0,0 +1,124 @@ +/*------------------------------------------------------------------------- + * + * thread.h: Thread utils used by Backup/Restore manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#ifndef THREAD_H +#define THREAD_H + +#include "../../pg_probackup.h" +#include "appender.h" +#include "buffer.h" +#include "../../parray.h" + +/* Constants Definition */ + +#define SENDER_BUFFER_SIZE 268435456 /* 256 * 1024 * 1024 Bytes, 256MB*/ +#define READER_THREAD_FILE_COUNT 8 +#define FILE_BUFFER_SIZE 8388608 /* 8 * 1024 * 1024, 8MB */ +#define WAIT_FOR_STATE_CHANGE_TIME 100000 /* 100 ms*/ + +/* Data Structure Definition*/ +typedef enum readerThreadState { + READER_THREAD_STATE_INIT = 0, + READER_THREAD_STATE_START, + READER_THREAD_STATE_FLUSHING, + READER_THREAD_STATE_FLUSHED, + READER_THREAD_STATE_ERROR, + READER_THREAD_STATE_STOP +} ReaderThreadState; + +typedef enum SenderThreadState { + SENDER_THREAD_STATE_INIT = 0, + SENDER_THREAD_STATE_START, + SENDER_THREAD_STATE_FINISH, + SENDER_THREAD_STATE_FINISHED, + SENDER_THREAD_STATE_ERROR, + SENDER_THREAD_STATE_STOP +} SenderThreadState; + +typedef struct ReaderCxt { + pgFile** file; + pgFile** prefile; + char** fromPath; + char** toPath; + char* fileBuffer; + uint32 fileCount; + FileAppender* appender; + FILE_APPEND_SEG_TYPE* segType; + bool* fileRemoved; + pthread_t readerThreadId; + ReaderThreadState state; + pthread_spinlock_t lock; +} ReaderCxt; + +typedef struct SenderCxt { + BufferCxt* bufferCxt; + pthread_t senderThreadId; + SenderThreadState state; + pthread_spinlock_t lock; +} SenderCxt; + +typedef struct restoreReaderThreadArgs { + BufferCxt* bufferCxt; + pgBackup* dest_backup; +} restoreReaderThreadArgs; + +typedef struct backupReaderThreadArgs +{ + backup_files_arg* arg; + ReaderCxt* readerCxt; +} backupReaderThreadArgs; + +/* API Function */ + +int getFreeReaderThread(); + +extern ReaderThreadState getReaderState(ReaderCxt* readerCxt); + +extern void setReaderState(ReaderCxt* readerCxt, ReaderThreadState state); + +extern SenderThreadState getSenderState(SenderCxt* senderCxt); + +extern void setSenderState(SenderCxt* senderCxt, SenderThreadState state); + +extern void startBackupReaders(backup_files_arg* arg, backupReaderThreadArgs* thread_args); + +extern void startBackupSender(); + +extern void* backupReaderThreadMain(void* arg); + +extern void* backupSenderThreadMain(void* arg); + +extern void initBackupSenderContext(SenderCxt** cxt); + +extern void initBackupReaderContexts(ReaderCxt** cxt); + +extern bool isSenderThreadStopped(SenderCxt* senderCxt); + +extern void destoryBackupSenderContext(); + +extern bool isReaderThreadStopped(ReaderCxt* readerCxt); + +extern void destoryBackupReaderContexts(); + +extern void copyFileToFileBuffer(ReaderCxt* readerCxt, int fileIndex, backup_files_arg* arg); + +extern void flushReaderContexts(void* arg); + +extern void waitForSenderThread(); + +extern void stopBackupReaders(); + +extern void stopBackupSender(); + +extern void waitForReadersCopyComplete(); + +extern void* restoreReaderThreadMain(void* arg); + +#endif /* THREAD_H */ \ No newline at end of file diff --git a/src/bin/pg_probackup/oss/oss_operator.cpp b/src/bin/pg_probackup/oss/oss_operator.cpp new file mode 100644 index 0000000000..90f3cfeea1 --- /dev/null +++ b/src/bin/pg_probackup/oss/oss_operator.cpp @@ -0,0 +1,288 @@ +/*------------------------------------------------------------------------- + * + * oss_operator.cpp: OSS operator used by Backup/Recovery manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "include/oss_operator.h" +#include "utils/elog.h" +#include "include/buffer.h" +#include "include/restore.h" + +namespace Oss { +Oss::Oss(const char* endpoint, const char* access_key, const char* secret_key, const char* region, bool secure) + : kEndpoint(endpoint), kAccessKey(access_key), kSecretKey(secret_key), kSecure(secure) { + options_ = new Aws::SDKOptions; + auto options = reinterpret_cast(options_); + Aws::InitAPI(*options); + Aws::Client::ClientConfiguration config; + if (region != NULL) { + kRegion.assign(region); + config.region = kRegion; + } + config.endpointOverride = kEndpoint; + if (kSecure) { + config.scheme = Aws::Http::Scheme::HTTPS; + config.verifySSL = true; + } else { + config.scheme = Aws::Http::Scheme::HTTP; + config.verifySSL = false; + } + completePartVector = new Aws::Vector(); + s3_client_ = new Aws::S3::S3Client(Aws::Auth::AWSCredentials(kAccessKey, kSecretKey), config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, false); +} + +Oss::~Oss() { + auto options = reinterpret_cast(options_); + // Before the application the applications terminates, the SDK must be shut down. + Aws::ShutdownAPI(*options); + // Clean up + delete reinterpret_cast(options_); + delete reinterpret_cast(s3_client_); + delete reinterpret_cast *>(completePartVector); +} + +void Oss::GetObject(const char* from_bucket, const char* object_key, void* filePtr) { + BufferCxt* file = (BufferCxt*)(filePtr); + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(from_bucket); + request.SetKey(object_key); + + auto outcome = s3_client->GetObject(request); + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + elog(ERROR, "GetObject: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + } + char buffer[BUFSIZE]; + std::stringstream ss; + ss << outcome.GetResultWithOwnership().GetBody().rdbuf(); + while (!ss.eof()) { + size_t readlen = ss.read(buffer, BUFSIZE).gcount(); + if (readlen < BUFSIZE) { + file->fileEnd = true; + } + if (writeToBuffer(buffer, readlen, file) != readlen) { + elog(ERROR, "GetObject: write to buffer failed."); + } + if (file->earlyExit) { + break; + } + } +} + +void Oss::GetObject(const char* bucket_name, const char* object_name, const char* file_name) { + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::GetObjectRequest request; + request.SetBucket(bucket_name); + request.SetKey(object_name); + auto outcome = s3_client->GetObject(request); + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + elog(ERROR, "GetObject: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + } + char* separator_pos = last_dir_separator(file_name); + char* dir_path = strndup(file_name, separator_pos - file_name); + fio_mkdir(dir_path, DIR_PERMISSION, FIO_BACKUP_HOST); + free(dir_path); + Aws::OFStream local_file; + local_file.open(file_name, ios::out | ios::binary); + if (!local_file) { + elog(ERROR, "Error GetObject : dst path's directory: %s is not existed.", file_name); + } + local_file << outcome.GetResultWithOwnership().GetBody().rdbuf(); +} + +void Oss::PutObject(const char* bucket_name, const char* file_path, const char* file_name) { + struct stat buffer; + if (stat(file_path, &buffer) == -1) { + elog(ERROR, "PutObject: File %s does not exist", file_name); + } + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucket_name); + request.SetKey(file_name); + shared_ptr input_data = + Aws::MakeShared("sSampleAllocationtTag", file_path, ios_base::in | ios_base::binary); + request.SetBody(input_data); + auto outcome = s3_client->PutObject(request); + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + elog(ERROR, "PutObject: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + } +} + +void Oss::StartMultipartUpload(char* bucket_name, char* object_name) +{ + auto s3_client = reinterpret_cast(s3_client_); + auto completeParts = (Aws::Vector*)completePartVector; + // set the target bucket and file path for uploading + const Aws::String bucket = bucket_name; + const Aws::String key = object_name; + // initialize parts upload task + Aws::S3::Model::CreateMultipartUploadRequest create_request; + create_request.SetBucket(bucket_name); + create_request.SetKey(object_name); + Aws::S3::Model::CreateMultipartUploadOutcome outcome = s3_client->CreateMultipartUpload(create_request); + // obtain upload ID and part number + UploadId = std::string(outcome.GetResult().GetUploadId().c_str()); + partNumber = 1; + completeParts->clear(); + // create part upload output + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + elog(ERROR, "StartMultipartUpload: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + } +} + +void Oss::MultipartUpload(char* bucket_name, char* object_name, char* data, size_t data_size) +{ + if (data_size == 0 || data == NULL) { + return; + } + auto s3_client = reinterpret_cast(s3_client_); + auto completeParts = (Aws::Vector*)completePartVector; + const Aws::String bucket = bucket_name; + const Aws::String key = object_name; + Aws::S3::Model::UploadPartRequest request1; + request1.SetBucket(bucket_name); + request1.SetKey(object_name); + // start uploading + Aws::S3::Model::UploadPartRequest uploadPartRequest; + uploadPartRequest.WithBucket(bucket).WithKey(key).WithUploadId(Aws::String(UploadId.c_str())).WithPartNumber(partNumber).WithContentLength(data_size); + Aws::String str(data, data_size); + auto input_data = Aws::MakeShared("UploadPartStream", str); + uploadPartRequest.SetBody(input_data); + auto uploadPartResult = s3_client->UploadPart(uploadPartRequest); + completeParts->push_back(Aws::S3::Model::CompletedPart().WithETag(uploadPartResult.GetResult().GetETag()).WithPartNumber(partNumber)); + ++partNumber; +} + +void Oss::CompleteMultipartUploadRequest(char* bucket_name, char* object_name) +{ + auto s3_client = reinterpret_cast(s3_client_); + auto completeParts = (Aws::Vector*)completePartVector; + const Aws::String bucket = bucket_name; + const Aws::String key = object_name; + // Complete parts upload + Aws::S3::Model::CompleteMultipartUploadRequest request; + request.SetBucket(bucket); + request.SetKey(key); + request.SetUploadId(Aws::String(UploadId.c_str())); + Aws::S3::Model::CompletedMultipartUpload completed_multipart_upload; + completed_multipart_upload.SetParts(*completeParts); + request.SetMultipartUpload(completed_multipart_upload); + Aws::S3::Model::CompleteMultipartUploadOutcome outcome = s3_client->CompleteMultipartUpload(request); + if (!outcome.IsSuccess()) { + elog(ERROR, "CompleteMultipartUploadRequest: %s", outcome.GetError().GetMessage().c_str()); + } +} + + +void Oss::RemoveObject(const char* bucket_name, const char* objcet_key) { + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::DeleteObjectRequest request; + request.WithKey(objcet_key).WithBucket(bucket_name); + auto outcome = s3_client->DeleteObject(request); + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + elog(WARNING, "RemoveObject: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + } +} + +void Oss::ListObjectsWithPrefix(char* bucket_name, char* prefix, parray* objects) +{ + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::ListObjectsRequest request; + request.WithBucket(bucket_name); + request.SetPrefix(prefix); + auto outcome = s3_client->ListObjects(request); + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + elog(ERROR, "ListObjectsWithPrefix: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + } + Aws::Vector resp = outcome.GetResult().GetContents(); + for (auto &bucket : resp) { + char* key = pg_strdup(bucket.GetKey().c_str()); + parray_append(objects, key); + } +} + +bool Oss::BucketExists(char* bucket_name) { + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::HeadBucketRequest request; + request.SetBucket(bucket_name); + auto outcome = s3_client->HeadBucket(request); + if (!outcome.IsSuccess()) { + return false; + } + return true; +} + +bool Oss::ObjectExists(char* bucket_name, char* object_name) { + auto s3_client = reinterpret_cast(s3_client_); + Aws::S3::Model::HeadObjectRequest headObjectRequest; + headObjectRequest.WithBucket(bucket_name).WithKey(object_name); + auto result = s3_client->HeadObject(headObjectRequest); + return result.IsSuccess(); +} +} // namespace Oss + +char* getBucketName() +{ + char* bucket_name = instance_config.oss.access_bucket; + if (bucket_name == NULL) { + elog(ERROR, "Required parameter not specified: OSS(--bucket_name)"); + } + return bucket_name; +} + +char* getPrefixName(void* backup) +{ + return ((pgBackup*)backup)->root_dir + 1; +} + +Oss::Oss* getOssClient() +{ + if (oss_client == NULL) { + const char* endpoint = instance_config.oss.endpoint; + const char* access_key = instance_config.oss.access_id; + const char* secret_key = instance_config.oss.access_key; + const char* region = instance_config.oss.region; + const char* access_bucket = instance_config.oss.access_bucket; + if (!endpoint || !access_key || !secret_key || !access_bucket) { + elog(ERROR, + "Required parameter not specified: OSS(--endpoint, --access_bucket, --access_id or --access_key)"); + } + oss_client = new Oss::Oss(endpoint, access_key, secret_key, region); + } + return (Oss::Oss*)oss_client; +} diff --git a/src/bin/pg_probackup/oss/restore.cpp b/src/bin/pg_probackup/oss/restore.cpp new file mode 100644 index 0000000000..40a4858337 --- /dev/null +++ b/src/bin/pg_probackup/oss/restore.cpp @@ -0,0 +1,350 @@ +#include "include/restore.h" +#include "include/oss_operator.h" +#include "include/thread.h" + +#include "storage/file/fio_device.h" +#include "common/fe_memutils.h" + +#define IsIllegalCharacter(c) ((c) != '/' && !isdigit((c)) && !isalpha((c)) && (c) != '_' && (c) != '-' && (c) != '.') +static fio_location location = FIO_BACKUP_HOST; +static pgFile* findpgFile(parray* files, char* to_path, int external_dir_num, device_type_t type); + +void performRestoreOrValidate(pgBackup *dest_backup, bool isValidate) +{ + /* for validate */ + char base_path[MAXPGPATH]; + char dss_path[MAXPGPATH]; + char external_prefix[MAXPGPATH]; + parray *files = NULL; + bool corrupted = false; + validate_files_arg arg; + if (dest_backup->files == NULL) { + files = get_backup_filelist(dest_backup, true); + dest_backup->files = files; + parray_qsort(dest_backup->files, pgFileCompareRelPathWithExternal); + } else { + files = dest_backup->files; + parray_qsort(dest_backup->files, pgFileCompareRelPathWithExternal); + } + if (isValidate) { + if (!pre_check_backup(dest_backup)) { + return; + } + join_path_components(base_path, dest_backup->root_dir, DATABASE_DIR); + join_path_components(dss_path, dest_backup->root_dir, DSSDATA_DIR); + join_path_components(external_prefix, dest_backup->root_dir, EXTERNAL_DIR); + if (!files) { + elog(WARNING, "Backup %s file list is corrupted", base36enc(dest_backup->start_time)); + dest_backup->status = BACKUP_STATUS_CORRUPT; + write_backup_status(dest_backup, BACKUP_STATUS_CORRUPT, instance_name, true); + return; + } + arg.base_path = base_path; + arg.dss_path = dss_path; + arg.files = files; + arg.corrupted = false; + arg.backup_mode = dest_backup->backup_mode; + arg.stop_lsn = dest_backup->stop_lsn; + arg.checksum_version = dest_backup->checksum_version; + arg.backup_version = parse_program_version(dest_backup->program_version); + arg.external_prefix = external_prefix; + arg.hdr_map = &(dest_backup->hdr_map); + arg.ret = 1; + } + /* Initialize the buffer context */ + BufferCxt* bufferCxt = (BufferCxt*)palloc(sizeof(BufferCxt)); + if (bufferCxt == NULL) { + elog(ERROR, "buffer context allocate failed: out of memory"); + } + initBufferCxt(bufferCxt, SENDER_BUFFER_SIZE); + restoreReaderThreadArgs args; + args.bufferCxt = bufferCxt; + args.dest_backup = dest_backup; + pthread_t restoreReaderThread; + pthread_create(&restoreReaderThread, nullptr, restoreReaderThreadMain, &args); + + char tempBuffer[BUFSIZE]; + BufferDesc* buff = NULL; + BufferDesc* nextBuff = NULL; + BufferDesc* prevBuff = NULL; + FileAppenderSegDescriptor* desc = NULL; + FileAppenderSegDescriptor* predesc = NULL; + initSegDescriptor(&desc); + initSegDescriptor(&predesc); + const size_t segHdrLen = sizeof(FileAppenderSegHeader); + char* buffOffset = NULL; + size_t remainBuffLen = 0; + int filenum = 0; + while(true) { + buff = tryGetNextFreeReadBuffer(bufferCxt); + char* buffEnd = buffLoc(buff, bufferCxt) + buff->usedLen; + if (remainBuffLen == 0) { + buffOffset = buffLoc(buff, bufferCxt); + } + if (unlikely(prevBuff != NULL)) { + remainBuffLen = remainBuffLen + buff->usedLen; + } else { + remainBuffLen = buffEnd - buffOffset; + } + while (segHdrLen <= remainBuffLen) { + memcpy_s(predesc, sizeof(FileAppenderSegDescriptor), desc, sizeof(FileAppenderSegDescriptor)); + getSegDescriptor(desc, &buffOffset, &remainBuffLen, bufferCxt); + if (prevBuff != NULL) { + clearBuff(prevBuff); + prevBuff = NULL; + } + // The payload spans across two buffs. + if (desc->header.size > 0 && desc->header.size > remainBuffLen) { + nextBuff = tryGetNextFreeReadBuffer(bufferCxt); + while (nextBuff->bufId == buff->bufId) { + pg_usleep(WAIT_FOR_BUFF_SLEEP_TIME); + nextBuff = tryGetNextFreeReadBuffer(bufferCxt); + } + // rewind + if (nextBuff->bufId == 0 && remainBuffLen > 0) { + desc->payload_offset = remainBuffLen; + } + remainBuffLen = remainBuffLen + nextBuff->usedLen; + } + parseSegDescriptor(desc, &buffOffset, &remainBuffLen, tempBuffer, bufferCxt, dest_backup, isValidate, &arg); + if (isValidate && arg.corrupted) { + remainBuffLen = 0; + corrupted = true; + break; + } + if (desc->header.type == FILE_APPEND_TYPE_FILES_END) { + filenum++; + if (filenum == bufferCxt->fileNum) { + break; + } + nextBuff = tryGetNextFreeReadBuffer(bufferCxt); + while (nextBuff->bufId == buff->bufId) { + pg_usleep(WAIT_FOR_BUFF_SLEEP_TIME); + nextBuff = tryGetNextFreeReadBuffer(bufferCxt); + } + remainBuffLen = nextBuff->usedLen; + buffOffset = buffLoc(nextBuff, bufferCxt); + } + if (nextBuff != NULL) { + clearBuff(buff); + buff = nextBuff; + nextBuff = NULL; + } + } + // The header spans across two buffs. + if (remainBuffLen > 0) { + prevBuff = buff; + // rewind + if (prevBuff->bufId == (buffNum(bufferCxt) - 1)) { + desc->header_offset = remainBuffLen; + } + continue; + } + // reuse the buffer + clearBuff(buff); + if (filenum == bufferCxt->fileNum || corrupted) { + break; + } + } + + args.bufferCxt->earlyExit = true; + pthread_join(restoreReaderThread, nullptr); + destorySegDescriptor(&desc); + destroyBufferCxt(bufferCxt); + if (isValidate) { + elog(INFO, "Finish validate file."); + if (corrupted) { + dest_backup->status = BACKUP_STATUS_CORRUPT; + } + write_backup_status(dest_backup, corrupted ? BACKUP_STATUS_CORRUPT : + BACKUP_STATUS_OK, instance_name, true); + if (corrupted) { + elog(WARNING, "Backup %s data files are corrupted", base36enc(dest_backup->start_time)); + } else { + elog(INFO, "Backup %s data files are valid", base36enc(dest_backup->start_time)); + } + } +} + +void restoreDir(const char* path, FileAppenderSegDescriptor* desc, pgBackup* dest_backup, + parray* files, bool isValidate) +{ + if (isValidate) { + return; + } + /* create directories */ + char to_path[MAXPGPATH]; + char from_root[MAXPGPATH]; + char dir_path[MAXPGPATH]; + errno_t rc; + size_t pathlen = desc->header.size; + rc = strncpy_s(to_path, pathlen + 1, path, pathlen); + pgFile* dir = findpgFile(files, to_path, desc->header.external_dir_num, desc->header.file_type); + if (dir == NULL) { + elog(ERROR, "Cannot find dir \"%s\"", to_path); + } + + if (dir->external_dir_num != 0) { + char external_prefix[MAXPGPATH]; + join_path_components(external_prefix, dest_backup->root_dir, EXTERNAL_DIR); + makeExternalDirPathByNum(from_root, external_prefix, desc->inputFile->external_dir_num); + } + else if (is_dss_type(dir->type)) { + join_path_components(from_root, dest_backup->root_dir, DSSDATA_DIR); + } else { + join_path_components(from_root, dest_backup->root_dir, DATABASE_DIR); + } + join_path_components(dir_path, from_root, to_path); + fio_mkdir(dir_path, desc->header.permission, location); +} + +void openRestoreFile(const char* path, FileAppenderSegDescriptor* desc, pgBackup* dest_backup, + parray* files, bool isValidate, validate_files_arg* arg) +{ + char to_path[MAXPGPATH]; + char from_root[MAXPGPATH]; + char filepath[MAXPGPATH]; + errno_t rc; + rc = strncpy_s(to_path, desc->header.size + 1, path, desc->header.size); + securec_check_c(rc, "\0", "\0"); + desc->inputFile = findpgFile(files, to_path, desc->header.external_dir_num, desc->header.file_type); + if (desc->inputFile == NULL) { + elog(ERROR, "Cannot find file \"%s\"", to_path); + } + if (isValidate) { + if (desc->inputFile->write_size == BYTES_INVALID) { + if (arg->backup_mode == BACKUP_MODE_FULL) { + /* It is illegal for file in FULL backup to have BYTES_INVALID */ + elog(WARNING, "Backup file \"%s\" has invalid size. Possible metadata corruption.", + desc->inputFile->rel_path); + arg->corrupted = true; + } + return; + } + INIT_FILE_CRC32(true, desc->crc); + } else { + if (desc->inputFile->external_dir_num != 0) { + char external_prefix[MAXPGPATH]; + join_path_components(external_prefix, dest_backup->root_dir, EXTERNAL_DIR); + makeExternalDirPathByNum(from_root, external_prefix, desc->inputFile->external_dir_num); + } + else if (is_dss_type(desc->inputFile->type)) { + join_path_components(from_root, dest_backup->root_dir, DSSDATA_DIR); + } else { + join_path_components(from_root, dest_backup->root_dir, DATABASE_DIR); + } + join_path_components(filepath, from_root, desc->inputFile->rel_path); + + if (desc->outputFile == NULL) { + desc->outputFile = fio_fopen(filepath, PG_BINARY_W, location); + } else if (desc->outputFile != NULL) { + desc->outputFile = fio_fopen(filepath, PG_BINARY_R "+", location); + } + if (desc->outputFile == NULL) { + elog(ERROR, "Cannot open restore file \"%s\": %s", + filepath, strerror(errno)); + } + setvbuf(desc->outputFile, NULL, _IONBF, BUFSIZ); + } +} + +void closeRestoreFile(FileAppenderSegDescriptor* desc) +{ + if (desc->outputFile && fio_fclose(desc->outputFile) != 0) { + elog(ERROR, "Cannot close file!", strerror(errno)); + } + desc->outputFile = NULL; + desc->inputFile = NULL; +} + +void writeOrValidateRestoreFile(const char* data, FileAppenderSegDescriptor* desc, + bool isValidate, validate_files_arg* arg) +{ + pgFile* dest_file = desc->inputFile; + if (dest_file == NULL) { + return; + } + /* Restore or Validate destination file */ + if (isValidate) { + if (!S_ISREG(dest_file->mode) || dest_file->write_size == 0 || + strcmp(dest_file->name, PG_XLOG_CONTROL_FILE) == 0) { + return; + } + if (dest_file->write_size == BYTES_INVALID) { + if (arg->backup_mode == BACKUP_MODE_FULL) { + elog(WARNING, "Backup file \"%s\" has invalid size. Possible metadata corruption.", + dest_file->rel_path); + arg->corrupted = true; + return; + } + return; + } + COMP_FILE_CRC32(true, desc->crc, data, desc->header.size); + if (desc->crc != desc->header.crc) { + arg->corrupted = true; + return; + } + } else if (desc->header.size > 0) { + if (fio_fwrite(desc->outputFile, data, desc->header.size) != desc->header.size) { + elog(ERROR, "Cannot write blocks of \"%s\": %s", desc->inputFile->rel_path, strerror(errno)); + } + } +} + +void restoreConfigDir() +{ + Oss::Oss* oss = getOssClient(); + char* bucket_name = getBucketName(); + char* prefix_name = backup_instance_path + 1; + parray *obj_list = parray_new(); + oss->ListObjectsWithPrefix(bucket_name, prefix_name, obj_list); + char dir_path[MAXPGPATH]; + for (size_t i = 0; i < parray_num(obj_list); i++) { + char* object = (char*)parray_get(obj_list, i); + char* filename = last_dir_separator(object); + char* dir_name = strndup(object, filename - object); + if (strcmp(filename + 1, BACKUP_CATALOG_CONF_FILE) == 0) { + pg_free(dir_name); + continue; + } + if (strcmp(filename + 1, BACKUP_CONTROL_FILE) == 0) { + join_path_components(dir_path, "/", dir_name); + fio_mkdir(dir_path, DIR_PERMISSION, location); + pg_free(dir_name); + } + } + parray_free(obj_list); +} + +void restoreConfigFile(const char* path) +{ + Oss::Oss* oss = getOssClient(); + const char* object_name = NULL; + const char* bucket_name = NULL; + bucket_name = getBucketName(); + object_name = path; + oss->GetObject(bucket_name, object_name, (char*)path); +} + +void uploadConfigFile(const char* path, const char* object_name) +{ + Oss::Oss* oss = getOssClient(); + const char* bucket_name = getBucketName(); + oss->RemoveObject(bucket_name, object_name); + oss->PutObject(bucket_name, path, object_name); + fio_unlink(path, location); +} + +static pgFile* findpgFile(parray* files, char* to_path, int external_dir_num, device_type_t type) +{ + pgFile* resfile = NULL; + pgFile tempfile; + tempfile.rel_path = to_path; + tempfile.external_dir_num = external_dir_num; + tempfile.type = type; + void* res = parray_bsearch(files, &tempfile, pgFileCompareRelPathWithExternal); + if (res != NULL) { + resfile = *(pgFile **)res; + } + return resfile; +} \ No newline at end of file diff --git a/src/bin/pg_probackup/oss/thread.cpp b/src/bin/pg_probackup/oss/thread.cpp new file mode 100644 index 0000000000..ea06bdfa9a --- /dev/null +++ b/src/bin/pg_probackup/oss/thread.cpp @@ -0,0 +1,453 @@ +/*------------------------------------------------------------------------- + * + * thread.cpp: Thread api used by Backup/Recovery manager. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION + * Portions Copyright (c) 2015-2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#include "include/thread.h" +#include "include/oss_operator.h" +#include "include/backup.h" +#include "include/appender.h" + +#include "utils/palloc.h" +#include "common/fe_memutils.h" +#include "storage/file/fio_device.h" + +void initBackupSenderContext(SenderCxt** cxt) +{ + SenderCxt* senderCxt = NULL; + senderCxt = (SenderCxt*)palloc(sizeof(SenderCxt)); + if (senderCxt == NULL) { + elog(ERROR, "sender context allocate failed: out of memory"); + } + pthread_spin_init(&senderCxt->lock, PTHREAD_PROCESS_PRIVATE); + senderCxt->state = SENDER_THREAD_STATE_INIT; + senderCxt->bufferCxt = (BufferCxt*)palloc(sizeof(BufferCxt)); + if (senderCxt->bufferCxt == NULL) { + pfree_ext(senderCxt); + elog(ERROR, "buffer context allocate failed: out of memory"); + } + /* Initialize the buffer context */ + initBufferCxt(senderCxt->bufferCxt, SENDER_BUFFER_SIZE); + *cxt = senderCxt; +} + +void startBackupSender() +{ + pthread_create(¤t.sender_cxt->senderThreadId, nullptr, backupSenderThreadMain, (void*)current.sender_cxt); +} + +bool isSenderThreadStopped(SenderCxt* senderCxt) +{ + pthread_spin_lock(&senderCxt->lock); + bool isStopped = senderCxt->state == SENDER_THREAD_STATE_STOP; + pthread_spin_unlock(&senderCxt->lock); + return isStopped; +} + +bool isReaderThreadStopped(ReaderCxt* readerCxt) +{ + pthread_spin_lock(&readerCxt->lock); + bool isStopped = (readerCxt->state == READER_THREAD_STATE_STOP); + pthread_spin_unlock(&readerCxt->lock); + return isStopped; +} + +void destoryBackupReaderContexts() +{ + ReaderCxt* readerCxt = NULL; + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + pthread_spin_lock(&readerCxt->lock); + readerCxt->state = READER_THREAD_STATE_STOP; + pthread_spin_unlock(&readerCxt->lock); + pthread_join(readerCxt->readerThreadId, NULL); + pthread_spin_destroy(&readerCxt->lock); + pfree_ext(readerCxt->file); + pfree_ext(readerCxt->prefile); + pfree_ext(readerCxt->fromPath); + pfree_ext(readerCxt->fileBuffer); + pfree_ext(readerCxt->segType); + pfree_ext(readerCxt->fileRemoved); + } + pfree_ext(current.readerCxt); +} + +void destoryBackupSenderContext() +{ + SenderCxt* senderCxt = current.sender_cxt; + pthread_spin_lock(&senderCxt->lock); + senderCxt->state = SENDER_THREAD_STATE_STOP; + pthread_spin_unlock(&senderCxt->lock); + pthread_join(senderCxt->senderThreadId, NULL); + pthread_spin_destroy(&senderCxt->lock); + pfree_ext(senderCxt->bufferCxt); + pfree_ext(senderCxt); +} + +void* backupSenderThreadMain(void* arg) +{ + SenderCxt* senderCxt = (SenderCxt*)arg; + BufferCxt* bufferCxt = senderCxt->bufferCxt; + BufferDesc* buff = NULL; + Oss::Oss* oss = NULL; + const uint32 partSize = 6 * 1024 * 1024; // 6MB + uint32 partLeftSize = partSize; + char* buffer = (char*)palloc(partSize * sizeof(char)); + char* bufferEndPtr = buffer + partSize; + // Open OSS connection + oss = getOssClient(); + // find OSS bucket + char* bucket_name = getBucketName(); + char* object_name = NULL; + SendFileInfo* fileinfo = NULL; + errno_t rc; + while (true) { + // wait for the buffer to be consumed + if (isSenderThreadStopped(senderCxt)) { + break; + } else if (getSenderState(senderCxt) != SENDER_THREAD_STATE_FINISH && !hasBufferForRead(bufferCxt)) { + continue; + } else if (getSenderState(senderCxt) == SENDER_THREAD_STATE_FINISH && !hasBufferForRead(bufferCxt)) { + setSenderState(senderCxt, SENDER_THREAD_STATE_FINISHED); + continue; + } + buff = tryGetNextFreeReadBuffer(bufferCxt); + // write the buffer to OSS + if (buff->usedLen != 0) { + if (buff->fileId != -1) { + pthread_spin_lock(&senderCxt->lock); + fileinfo = (SendFileInfo*)parray_get(current.filesinfo, buff->fileId); + pthread_spin_unlock(&senderCxt->lock); + } + if (fileinfo != NULL && object_name == NULL) { + object_name = fileinfo->filename; + oss->StartMultipartUpload(bucket_name, object_name); + partLeftSize = partSize; + } else if (fileinfo != NULL && strcmp(object_name, fileinfo->filename) != 0) { + oss->MultipartUpload(bucket_name, object_name, buffer, (partSize - partLeftSize)); + oss->CompleteMultipartUploadRequest(bucket_name, object_name); + object_name = fileinfo->filename; + oss->StartMultipartUpload(bucket_name, object_name); + partLeftSize = partSize; + } else if (fileinfo == NULL) { + elog(ERROR, "get file info failed."); + } + if (buff->usedLen < partLeftSize) { + rc = memcpy_s(bufferEndPtr - partLeftSize, buff->usedLen, buffLoc(buff, bufferCxt), buff->usedLen); + securec_check(rc, "\0", "\0"); + partLeftSize = partLeftSize - buff->usedLen; + } else { + uint32 buff_off = buff->usedLen - partLeftSize; + rc = memcpy_s(bufferEndPtr - partLeftSize, partLeftSize, buffLoc(buff, bufferCxt), partLeftSize); + securec_check(rc, "\0", "\0"); + oss->MultipartUpload(bucket_name, object_name, buffer, partSize); + memset_s(buffer, partSize, 0, partSize); + securec_check(rc, "\0", "\0"); + if (buff_off > 0) { + rc = memcpy_s(buffer, buff_off, buffLoc(buff, bufferCxt) + partLeftSize, buff_off); + securec_check(rc, "\0", "\0"); + } + partLeftSize = partSize - buff_off; + } + } + // reuse the buffer + clearBuff(buff); + } + if (bucket_name != NULL && object_name != NULL) { + oss->MultipartUpload(bucket_name, object_name, buffer, (partSize - partLeftSize)); + oss->CompleteMultipartUploadRequest(bucket_name, object_name); + } + if (bufferCxt != NULL) { + destroyBufferCxt((BufferCxt *)bufferCxt); + } + pfree_ext(buffer); + return NULL; +} + +void initBackupReaderContexts(ReaderCxt** cxt) +{ + ReaderCxt* readerCxt= NULL; + current.readerThreadCount = num_threads - 1; + /* alloc memmory */ + ReaderCxt* current_readerCxt = (ReaderCxt*)palloc(sizeof(ReaderCxt) * current.readerThreadCount); + if (current_readerCxt == NULL) { + elog(ERROR, "reader thread allocate failed: out of memory"); + } + /* Initialize the reader context */ + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t_readerCxt[i]; + pthread_spin_init(&readerCxt->lock, PTHREAD_PROCESS_PRIVATE); + readerCxt->state = READER_THREAD_STATE_INIT; + readerCxt->file = (pgFile**)palloc(sizeof(pgFile*) * READER_THREAD_FILE_COUNT); + if (readerCxt->file == NULL) { + elog(ERROR, "file list allocate failed: out of memory"); + } + readerCxt->prefile = (pgFile**)palloc(sizeof(pgFile*) * READER_THREAD_FILE_COUNT); + if (readerCxt->prefile == NULL) { + elog(ERROR, "prefile list allocate failed: out of memory"); + } + readerCxt->fromPath = (char**)palloc(sizeof(char*) * READER_THREAD_FILE_COUNT); + if (readerCxt->fromPath == NULL) { + elog(ERROR, "fromPath list allocate failed: out of memory"); + } + readerCxt->toPath = (char**)palloc(sizeof(char*) * READER_THREAD_FILE_COUNT); + if (readerCxt->toPath == NULL) { + elog(ERROR, "toPath list allocate failed: out of memory"); + } + readerCxt->fileBuffer = (char *)palloc(FILE_BUFFER_SIZE * READER_THREAD_FILE_COUNT); + if (readerCxt->fileBuffer == NULL) { + elog(ERROR, "file buffer allocate failed: out of memory"); + } + readerCxt->segType = (FILE_APPEND_SEG_TYPE *)palloc(sizeof(FILE_APPEND_SEG_TYPE) * READER_THREAD_FILE_COUNT); + if (readerCxt->segType == NULL) { + elog(ERROR, "segment type allocate failed: out of memory"); + } + readerCxt->fileRemoved = (bool *)palloc(sizeof(bool) * READER_THREAD_FILE_COUNT); + if (readerCxt->fileRemoved == NULL) { + elog(ERROR, "file removed flag allocate failed: out of memory"); + } + } + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t_readerCxt[i]; + readerCxt->fileCount = 0; + readerCxt->readerThreadId = 0; + readerCxt->appender = NULL; + for (uint j = 0; j < READER_THREAD_FILE_COUNT; j++) { + readerCxt->file[j] = NULL; + readerCxt->prefile[j] = NULL; + readerCxt->fromPath[j] = NULL; + readerCxt->toPath[j] = NULL; + readerCxt->segType[j] = FILE_APPEND_TYPE_UNKNOWN; + readerCxt->fileRemoved[j] = false; + } + } + *cxt = current_readerCxt; +} + +void startBackupReaders(backup_files_arg* arg, backupReaderThreadArgs* thread_args) +{ + for (uint i = 0; i < current.readerThreadCount; i++) { + backupReaderThreadArgs* args = &thread_args[i]; + args->arg = arg; + args->readerCxt = ¤t.readerCxt[i]; + pthread_create(&(args->readerCxt->readerThreadId), nullptr, backupReaderThreadMain, (void*)args); + } +} + +void* backupReaderThreadMain(void* thread_args) +{ + backupReaderThreadArgs* args = (backupReaderThreadArgs*)thread_args; + backup_files_arg* arg = (backup_files_arg*)(args->arg); + ReaderCxt* readerCxt = (ReaderCxt*)(args->readerCxt); + while (!isReaderThreadStopped(readerCxt)) { + if (getReaderState(readerCxt) != READER_THREAD_STATE_START) { + pg_usleep(WAIT_FOR_STATE_CHANGE_TIME); + continue; + } + Assert(readerCxt->fileCount <= READER_THREAD_FILE_COUNT); + for (uint i = 0; i < readerCxt->fileCount; i++) { + copyFileToFileBuffer(readerCxt, i, arg); + } + setReaderState(readerCxt, READER_THREAD_STATE_FLUSHING); + } + return NULL; +} + +void copyFileToFileBuffer(ReaderCxt* readerCxt, int fileIndex, backup_files_arg* arg) +{ + pgFile* file = readerCxt->file[fileIndex]; + pgFile* prev_file = readerCxt->prefile[fileIndex]; + char* fileBuffer = readerCxt->fileBuffer + fileIndex * FILE_BUFFER_SIZE; + char* from_fullpath = readerCxt->fromPath[fileIndex]; + char* to_fullpath = readerCxt->toPath[fileIndex]; + if (file->is_datafile && !file->is_cfs) { + backup_data_file(&(arg->conn_arg), file, from_fullpath, to_fullpath, + arg->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + arg->nodeInfo->checksum_version, + arg->hdr_map, false, NULL, fileBuffer); + } else { + backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, true, NULL, fileBuffer); + } +} + +ReaderThreadState getReaderState(ReaderCxt* readerCxt) +{ + pthread_spin_lock(&readerCxt->lock); + ReaderThreadState state = readerCxt->state; + pthread_spin_unlock(&readerCxt->lock); + return state; +} + +void setReaderState(ReaderCxt* readerCxt, ReaderThreadState state) +{ + pthread_spin_lock(&readerCxt->lock); + readerCxt->state = state; + pthread_spin_unlock(&readerCxt->lock); +} + +void setSenderState(SenderCxt* senderCxt, SenderThreadState state) +{ + pthread_spin_lock(&senderCxt->lock); + senderCxt->state = state; + pthread_spin_unlock(&senderCxt->lock); +} + +SenderThreadState getSenderState(SenderCxt* senderCxt) +{ + pthread_spin_lock(&senderCxt->lock); + SenderThreadState state = senderCxt->state; + pthread_spin_unlock(&senderCxt->lock); + return state; +} + +void flushReaderContexts(void* arg) +{ + ReaderCxt* readerCxt = NULL; + char* fileBuffer = NULL; + backup_files_arg* args = (backup_files_arg*)arg; + waitForReadersCopyComplete(); + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + for (uint j = 0; j < readerCxt->fileCount; j++) { + if (!readerCxt->fileRemoved[j]) { + fileBuffer = readerCxt->fileBuffer + j * FILE_BUFFER_SIZE; + if (readerCxt->file[j]->is_datafile && !readerCxt->file[j]->is_cfs) { + backup_data_file(&(args->conn_arg), readerCxt->file[j], readerCxt->fromPath[j], readerCxt->toPath[j], + args->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + args->nodeInfo->checksum_version, + args->hdr_map, false, readerCxt->appender, NULL); + } else { + backup_non_data_file(readerCxt->file[j], readerCxt->prefile[j], readerCxt->fromPath[j], readerCxt->toPath[j], + current.backup_mode, current.parent_backup, true, readerCxt->appender, NULL); + } + pg_free(readerCxt->fromPath[j]); + pg_free(readerCxt->toPath[j]); + readerCxt->fileRemoved[j] = true; + readerCxt->file[j] = NULL; + readerCxt->prefile[j] = NULL; + } + } + readerCxt->fileCount = 0; + setReaderState(readerCxt, READER_THREAD_STATE_FLUSHED); + } +} + +void waitForReadersCopyComplete() +{ + ReaderCxt* readerCxt = NULL; + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + if (getReaderState(readerCxt) == READER_THREAD_STATE_INIT || + getReaderState(readerCxt) == READER_THREAD_STATE_FLUSHED) { + setReaderState(readerCxt, READER_THREAD_STATE_START); + } + } + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + if (getReaderState(readerCxt) == READER_THREAD_STATE_START) { + pg_usleep(WAIT_FOR_STATE_CHANGE_TIME); + i = i - 1; + continue; + } + } +} + +void waitForSenderThread() +{ + SenderCxt* senderCxt = current.sender_cxt; + while (getSenderState(senderCxt) != SENDER_THREAD_STATE_FINISHED) { + pg_usleep(WAIT_FOR_STATE_CHANGE_TIME); + } +} + +void stopBackupReaders() +{ + ReaderCxt* readerCxt = NULL; + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + setReaderState(readerCxt, READER_THREAD_STATE_STOP); + } + for (uint i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + pthread_join(readerCxt->readerThreadId, NULL); + } +} + +void stopBackupSender() +{ + SenderCxt* senderCxt = current.sender_cxt; + setSenderState(senderCxt, SENDER_THREAD_STATE_STOP); + pthread_join(senderCxt->senderThreadId, NULL); +} + +int getFreeReaderThread() +{ + int slot = -1; + ReaderCxt* readerCxt = NULL; + for (size_t i = 0; i < current.readerThreadCount; i++) { + readerCxt = ¤t.readerCxt[i]; + if (getReaderState(readerCxt) == READER_THREAD_STATE_INIT || + getReaderState(readerCxt) == READER_THREAD_STATE_FLUSHED) { + slot = i; + break; + } + } + return slot; +} + +void* restoreReaderThreadMain(void* arg) +{ + restoreReaderThreadArgs* args = (restoreReaderThreadArgs*)arg; + /* get pbk file from oss server */ + Oss::Oss* oss = getOssClient(); + const int object_suffix_len = 4; + char* object_name = NULL; + char* prefix_name = getPrefixName(args->dest_backup); + char* bucket_name = getBucketName(); + if (bucket_name == NULL || !oss->BucketExists(bucket_name)) { + elog(ERROR, "bucket %s not found, please create it first", bucket_name ? bucket_name : "null"); + } + parray* objects = parray_new(); + oss->ListObjectsWithPrefix(bucket_name, prefix_name, objects); + size_t objects_num = parray_num(objects); + size_t pbk_objects_num = 0; + for(size_t i = 0; i < objects_num; ++i) { + object_name = (char*)parray_get(objects, i); + if (strncmp(object_name + strlen(object_name) - object_suffix_len, ".pbk", object_suffix_len) == 0) { + pbk_objects_num++; + } + } + args->bufferCxt->fileNum = pbk_objects_num; + elog(INFO, "the total number of backup %s's file objects is %d, and pbk file objects is %d", + base36enc(args->dest_backup->start_time), objects_num, pbk_objects_num); + for(size_t i = 0; i < objects_num; ++i) { + if (args->bufferCxt->earlyExit) { + break; + } + object_name = (char*)parray_get(objects, i); + elog(INFO, "download object: %s from s3", object_name); + if (strncmp(object_name + strlen(object_name) - object_suffix_len, ".pbk", object_suffix_len) == 0) { + args->bufferCxt->fileEnd = false; + oss->GetObject(bucket_name, object_name, (void*)args->bufferCxt); + } else { + char file_name[MAXPGPATH]; + int rc = snprintf_s(file_name, MAXPGPATH, MAXPGPATH - 1, "/%s", object_name); + securec_check_ss_c(rc, "\0", "\0"); + oss->GetObject(bucket_name, object_name, (char*)file_name); + } + } + parray_free(objects); + return NULL; +} \ No newline at end of file diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index 362aff48a3..bfd1dc484a 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -25,6 +25,7 @@ #include "storage/file/fio_device.h" #include "storage/dss/dss_adaptor.h" #include +#include "oss/include/restore.h" #define MIN_ULIMIT_STACK_SIZE 8388608 // 1024 * 1024 * 8 @@ -87,6 +88,7 @@ bool backup_replslots = false; bool smooth_checkpoint; char *remote_agent; static char *backup_note = NULL; +static char *oss_status_string = NULL; /* restore options */ static char *target_time = NULL; static char *target_xid = NULL; @@ -148,11 +150,15 @@ static pgSetBackupParams *set_backup_params = NULL; pgBackup current; static ProbackupSubcmd backup_subcmd = NO_CMD; +/* Oss Client*/ +void* oss_client = NULL; + static bool help_opt = false; static void opt_incr_restore_mode(ConfigOption *opt, const char *arg); static void opt_backup_mode(ConfigOption *opt, const char *arg); static void opt_show_format(ConfigOption *opt, const char *arg); +static void opt_media_type(ConfigOption *opt, const char *arg); static void compress_init(void); static void dss_init(void); @@ -222,6 +228,8 @@ static ConfigOption cmd_options[] = { 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT }, { 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT }, { 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT }, + { 'f', 'M', "media-type", (void *)opt_media_type, SOURCE_CMD_STRICT }, + { 's', 241, "s3-status", &oss_status_string, SOURCE_CMD_STRICT }, { 0 } }; @@ -445,7 +453,13 @@ static void parse_instance_name() { join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigFile(path); + } config_read_opt(path, instance_options, ERROR, true, false); + if (current.media_type == MEDIA_TYPE_OSS) { + remove(path); + } } setMyLocation(); } @@ -610,6 +624,7 @@ static void parse_backup_option_to_params(char *command, char *command_name) if (backup_subcmd == SET_BACKUP_CMD || backup_subcmd == BACKUP_CMD) { time_t expire_time = 0; + oss_status_t oss_status = OSS_STATUS_INVALID; if (expire_time_string && ttl >= 0) elog(ERROR, "You cannot specify '--expire-time' and '--ttl' options together"); @@ -622,12 +637,17 @@ static void parse_backup_option_to_params(char *command, char *command_name) expire_time_string); } - if (expire_time > 0 || ttl >= 0 || backup_note) + if (oss_status_string) { + oss_status = str2ossStatus(oss_status_string); + } + + if (expire_time > 0 || ttl >= 0 || backup_note || oss_status_string) { set_backup_params = pgut_new(pgSetBackupParams); set_backup_params->ttl = ttl; set_backup_params->expire_time = expire_time; set_backup_params->note = backup_note; + set_backup_params->oss_status = oss_status; if (backup_note && strlen(backup_note) > MAX_NOTE_SIZE) elog(ERROR, "Backup note cannot exceed %u bytes", MAX_NOTE_SIZE); @@ -918,6 +938,12 @@ opt_backup_mode(ConfigOption *opt, const char *arg) current.backup_mode = parse_backup_mode(arg); } +static void +opt_media_type(ConfigOption *opt, const char *arg) +{ + current.media_type = parse_media_type(arg); +} + static void opt_show_format(ConfigOption *opt, const char *arg) { diff --git a/src/bin/pg_probackup/pg_probackupb.h b/src/bin/pg_probackup/pg_probackupb.h index 56d19a24f0..9144f8fee1 100644 --- a/src/bin/pg_probackup/pg_probackupb.h +++ b/src/bin/pg_probackup/pg_probackupb.h @@ -101,6 +101,12 @@ typedef enum ShowFormat SHOW_JSON } ShowFormat; +typedef enum MediaType { + MEDIA_TYPE_UNKNOWN = 0, + MEDIA_TYPE_DISK, + MEDIA_TYPE_OSS +} MediaType; + /* special values of pgBackup fields */ #define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */ #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ @@ -174,6 +180,9 @@ typedef struct InstanceConfig /* DSS conntct parameters */ DssOptions dss; + + /* OSS parameters*/ + OssOptions oss; } InstanceConfig; extern ConfigOption instance_options[]; @@ -204,6 +213,9 @@ typedef struct HeaderMap } HeaderMap; +struct SenderCxt; +struct ReaderCxt; + typedef struct pgBackup pgBackup; /* Information about single backup stored in backup.conf */ @@ -291,6 +303,17 @@ struct pgBackup /* device type */ device_type_t storage_type; + + /* media type */ + MediaType media_type; + /* local or oss */ + oss_status_t oss_status; + /* sender context */ + SenderCxt* sender_cxt; + parray* filesinfo; + /* reader count and context */ + uint32 readerThreadCount; + ReaderCxt* readerCxt; }; /* Recovery target for restore and validate subcommands */ @@ -339,6 +362,7 @@ typedef struct pgSetBackupParams * must be pinned. */ char *note; + oss_status_t oss_status; } pgSetBackupParams; typedef struct @@ -444,6 +468,28 @@ typedef struct BackupPageHeader2 uint16 checksum; } BackupPageHeader2; +typedef enum FILE_APPEND_SEG_TYPE +{ + FILE_APPEND_TYPE_UNKNOWN = 0, + FILE_APPEND_TYPE_FILES, + FILE_APPEND_TYPE_DIR, + FILE_APPEND_TYPE_FILE, + FILE_APPEND_TYPE_FILE_CONTENT, + FILE_APPEND_TYPE_FILE_END, + FILE_APPEND_TYPE_FILES_END +} FILE_APPEND_SEG_TYPE; + +typedef struct FileAppender { + void* filePtr; /* hold the buffer context handle */ + char* baseFileName; + char* currFileName; + uint32 fileNo; + uint32 minFileNo; + uint32 maxFileNo; + uint64 currFileSize; + FILE_APPEND_SEG_TYPE type; +} FileAppender; + /* Special value for compressed_size field */ #define PageIsOk 0 #define SkipCurrentPage -1 diff --git a/src/bin/pg_probackup/pg_probackupc.h b/src/bin/pg_probackup/pg_probackupc.h index 93408264cf..095c73ec5a 100644 --- a/src/bin/pg_probackup/pg_probackupc.h +++ b/src/bin/pg_probackup/pg_probackupc.h @@ -88,6 +88,9 @@ extern bool skip_block_validation; /* current settings */ extern pgBackup current; +/* Oss Client*/ +extern void* oss_client; + /* argv of the process */ extern char** commands_args; @@ -100,6 +103,7 @@ extern int do_backup(time_t start_time, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs, bool backup_replslots); extern BackupMode parse_backup_mode(const char *value); extern const char *deparse_backup_mode(BackupMode mode); +extern MediaType parse_media_type(const char *value); extern void process_block_change(ForkNumber forknum, const RelFileNode rnode, BlockNumber blkno); @@ -166,6 +170,7 @@ extern int do_validate_all(void); extern int validate_one_page(Page page, BlockNumber absolute_blkno, XLogRecPtr stop_lsn, PageState *page_st, uint32 checksum_version); +extern bool pre_check_backup(pgBackup *backup); /* return codes for validate_one_page */ /* TODO: use enum */ @@ -315,15 +320,16 @@ extern void backup_data_file(ConnectionArgs* conn_arg, pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, - HeaderMap *hdr_map, bool missing_ok); + HeaderMap *hdr_map, bool missing_ok, + FileAppender* appender = NULL, char* fileBuffer = NULL); extern void backup_non_data_file(pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, - bool missing_ok); + bool missing_ok, FileAppender* appender = NULL, char* fileBuffer = NULL); extern void backup_non_data_file_internal(const char *from_fullpath, fio_location from_location, const char *to_fullpath, pgFile *file, - bool missing_ok); + bool missing_ok, FileAppender* appender = NULL, char** fileBuffer = NULL); extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, @@ -404,7 +410,9 @@ extern void time2iso(char *buf, size_t len, time_t time); extern const char *status2str(BackupStatus status); extern BackupStatus str2status(const char *status); extern const char *dev2str(device_type_t type); +extern const char *ossStatus2str(oss_status_t status); extern device_type_t str2dev(const char *dev); +extern oss_status_t str2ossStatus(const char *status); extern const char *base36enc(long unsigned int value); extern char *base36enc_dup(long unsigned int value); extern long unsigned int base36dec(const char *text); @@ -436,18 +444,19 @@ extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 extern int send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, - BackupMode backup_mode); + BackupMode backup_mode, FileAppender* appender = NULL, char* fileBuffer = NULL); /* FIO */ extern void fio_delete(mode_t mode, const char *fullpath, fio_location location); extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg, - BackupPageHeader2 **headers); + BackupPageHeader2 **headers, FileAppender* appender = NULL, char** fileBuffer = NULL); /* return codes for fio_send_pages */ extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, - pgFile *file, char **errormsg); + pgFile *file, char **errormsg, + FileAppender* appender = NULL, char** fileBuffer = NULL); extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, @@ -496,4 +505,25 @@ void *gs_palloc0(Size size); char *gs_pstrdup(const char *in); void *gs_repalloc(void *pointer, Size size); +typedef struct +{ + const char *base_path; + const char *dss_path; + parray *files; + bool corrupted; + XLogRecPtr stop_lsn; + uint32 checksum_version; + uint32 backup_version; + BackupMode backup_mode; + const char *external_prefix; + HeaderMap *hdr_map; + + /* + * Return value from the thread. + * 0 means there is no error, 1 - there is an error. + */ + int ret; +} validate_files_arg; + + #endif /* PG_PROBACKUPC_H */ diff --git a/src/bin/pg_probackup/restore.cpp b/src/bin/pg_probackup/restore.cpp index 683252dcb4..a323e09850 100644 --- a/src/bin/pg_probackup/restore.cpp +++ b/src/bin/pg_probackup/restore.cpp @@ -21,6 +21,7 @@ #include "catalog/catalog.h" #include "storage/file/fio_device.h" #include "logger.h" +#include "oss/include/restore.h" #define RESTORE_ARRAY_LEN 100 @@ -380,39 +381,45 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, } /* validate datafiles only */ - pgBackupValidate(tmp_backup, params); - - /* After pgBackupValidate() only following backup - * states are possible: ERROR, RUNNING, CORRUPT and OK. - * Validate WAL only for OK, because there is no point - * in WAL validation for corrupted, errored or running backups. - */ - if (tmp_backup->status != BACKUP_STATUS_OK) - { - corrupted_backup = tmp_backup; - break; + if (current.media_type == MEDIA_TYPE_OSS && !params->is_restore && + tmp_backup->oss_status != OSS_STATUS_LOCAL) { + performRestoreOrValidate(tmp_backup, true); + } else if (current.media_type != MEDIA_TYPE_OSS || tmp_backup->oss_status == OSS_STATUS_LOCAL) { + pgBackupValidate(tmp_backup, params); + /* After pgBackupValidate() only following backup + * states are possible: ERROR, RUNNING, CORRUPT and OK. + * Validate WAL only for OK, because there is no point + * in WAL validation for corrupted, errored or running backups. + */ + if (tmp_backup->status != BACKUP_STATUS_OK) + { + corrupted_backup = tmp_backup; + break; + } + /* We do not validate WAL files of intermediate backups + * It`s done to speed up restore + */ } - /* We do not validate WAL files of intermediate backups - * It`s done to speed up restore - */ } /* There is no point in wal validation of corrupted backups */ // TODO: there should be a way for a user to request only(!) WAL validation - if (!corrupted_backup) - { - /* - * Validate corresponding WAL files. - * We pass base_full_backup timeline as last argument to this function, - * because it's needed to form the name of xlog file. - */ - validate_wal(dest_backup, arclog_path, rt->target_time, - rt->target_xid, rt->target_lsn, - dest_backup->tli, instance_config.xlog_seg_size); + if(current.media_type != MEDIA_TYPE_OSS || tmp_backup->oss_status == OSS_STATUS_LOCAL) { + if (!corrupted_backup) + { + /* + * Validate corresponding WAL files. + * We pass base_full_backup timeline as last argument to this function, + * because it's needed to form the name of xlog file. + */ + validate_wal(dest_backup, arclog_path, rt->target_time, + rt->target_xid, rt->target_lsn, + dest_backup->tli, instance_config.xlog_seg_size); + } + /* Orphanize every OK descendant of corrupted backup */ + else + set_orphan_status(backups, corrupted_backup); } - /* Orphanize every OK descendant of corrupted backup */ - else - set_orphan_status(backups, corrupted_backup); } /* @@ -449,6 +456,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, */ if (params->is_restore) { + restore_chain(dest_backup, parent_chain, params, instance_config.pgdata, instance_config.dss.vgdata, no_sync); @@ -1230,6 +1238,41 @@ static void threads_handle(pthread_t *threads, pthread_create(&progressThread, nullptr, ProgressReportRestore, nullptr); /* Restore files into target directory */ + if (current.media_type == MEDIA_TYPE_OSS) { + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { + pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); + if (!lock_backup(backup, true)) { + elog(ERROR, "Cannot lock backup %s", base36enc(backup->start_time)); + } + if (backup->oss_status == OSS_STATUS_LOCAL) { + continue; + } + if (backup->status != BACKUP_STATUS_OK && + backup->status != BACKUP_STATUS_DONE) { + if (params->force) + elog(WARNING, "Backup %s is not valid, restore is forced", + base36enc(backup->start_time)); + else + elog(ERROR, "Backup %s cannot be restored because it is not valid", + base36enc(backup->start_time)); + } + /* confirm block size compatibility */ + if (backup->block_size != BLCKSZ) + elog(ERROR, + "BLCKSZ(%d) is not compatible(%d expected)", + backup->block_size, BLCKSZ); + if (backup->wal_block_size != XLOG_BLCKSZ) + elog(ERROR, + "XLOG_BLCKSZ(%d) is not compatible(%d expected)", + backup->wal_block_size, XLOG_BLCKSZ); + performRestoreOrValidate(backup, false); + /* Backup is downloaded. Update backup status */ + backup->end_time = time(NULL); + backup->oss_status = OSS_STATUS_LOCAL; + write_backup(backup, true); + } + } + for (i = 0; i < num_threads; i++) { restore_files_arg *arg = &(threads_args[i]); @@ -1397,7 +1440,6 @@ restore_files(void *arg) PageState *checksum_map = NULL; /* it should take ~1.5MB at most */ datapagemap_t *lsn_map = NULL; /* it should take 16kB at most */ pgFile *dest_file = (pgFile *)parray_get(arguments->dest_files, i); - /* Directories were created before */ if (S_ISDIR(dest_file->mode)) { directoryFilesLocal++; @@ -1504,8 +1546,6 @@ restore_files(void *arg) elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, strerror(errno)); - - // If destination file is 0 sized, then just close it and go for the next if (dest_file->write_size == 0) goto done; diff --git a/src/bin/pg_probackup/show.cpp b/src/bin/pg_probackup/show.cpp index d7fa02e916..249eec1504 100644 --- a/src/bin/pg_probackup/show.cpp +++ b/src/bin/pg_probackup/show.cpp @@ -37,6 +37,7 @@ typedef struct ShowBackendRow char start_lsn[20]; char stop_lsn[20]; char type[20]; + char oss_status[20]; const char *status; } ShowBackendRow; @@ -572,16 +573,16 @@ static void process_time(pgBackup *backup, ShowBackendRow *row) static void show_instance_plain(const char *instance_name, device_type_t instance_type, parray *backup_list, bool show_name) { -#define SHOW_FIELDS_COUNT 15 +#define SHOW_FIELDS_COUNT 16 int i; const char *names[SHOW_FIELDS_COUNT] = { "Instance", "Version", "ID", "Recovery Time", "Mode", "WAL Mode", "TLI", "Time", "Data", "WAL", - "Zratio", "Start LSN", "Stop LSN", "Type", "Status" }; + "Zratio", "Start LSN", "Stop LSN", "Type", "S3 Status", "Status" }; const char *field_formats[SHOW_FIELDS_COUNT] = { " %-*s ", " %-*s ", " %-*s ", " %-*s ", " %-*s ", " %-*s ", " %-*s ", " %*s ", " %*s ", " %*s ", - " %*s ", " %-*s ", " %-*s ", " %-*s ", " %-*s "}; + " %*s ", " %-*s ", " %-*s ", " %-*s ", " %-*s ", " %-*s "}; uint32 widths[SHOW_FIELDS_COUNT]; uint32 widths_sum = 0; ShowBackendRow *rows = NULL; @@ -713,6 +714,12 @@ show_instance_plain(const char *instance_name, device_type_t instance_type, par widths[cur] = Max(widths[cur], (uint32)strlen(row->type)); cur++; + /* S3 Status (LOCAL OR S3) */ + rc = snprintf_s(row->oss_status, lengthof(row->oss_status), lengthof(row->oss_status) - 1, "%s", ossStatus2str(backup->oss_status)); + securec_check_ss_c(rc, "\0", "\0"); + widths[cur] = Max(widths[cur], (uint32)strlen(row->oss_status)); + cur++; + /* Status */ row->status = status2str(backup->status); widths[cur] = Max(widths[cur], strlen(row->status)); @@ -805,6 +812,10 @@ show_instance_plain(const char *instance_name, device_type_t instance_type, par row->type); cur++; + appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur], + row->oss_status); + cur++; + appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur], row->status); cur++; diff --git a/src/bin/pg_probackup/util.cpp b/src/bin/pg_probackup/util.cpp index 1951c0e31c..2cd684c6d0 100644 --- a/src/bin/pg_probackup/util.cpp +++ b/src/bin/pg_probackup/util.cpp @@ -22,6 +22,7 @@ #include "tool_common.h" #include "common/fe_memutils.h" #include "storage/file/fio_device.h" +#include "oss/include/restore.h" static const char *statusName[] = { @@ -46,6 +47,14 @@ static const char *devTypeName[] = "UNKNOWN" }; +static const char *ossStatusName[] = +{ + "LOCAL", + "S3", + "UNKNOWN", + "UNKNOWN" +}; + uint32 NUM_65536 = 65536; uint32 NUM_10000 = 10000; @@ -733,6 +742,9 @@ copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, } else { writeControlFile(&ControlFile, to_fullpath, to_location); } + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(to_fullpath, to_fullpath); + } pg_free(buffer); } @@ -817,6 +829,11 @@ const char *dev2str(device_type_t type) return devTypeName[type]; } +const char *ossStatus2str(oss_status_t type) +{ + return ossStatusName[type]; +} + device_type_t str2dev(const char *dev) { for (int i = 0; i < (int)DEV_TYPE_NUM; i++) { @@ -826,6 +843,15 @@ device_type_t str2dev(const char *dev) return DEV_TYPE_INVALID; } +oss_status_t str2ossStatus(const char *status) +{ + for (int i = 0; i < (int)OSS_STATUS_NUM; i++) { + if (pg_strcasecmp(status, ossStatusName[i]) == 0) + return (oss_status_t)i; + } + return OSS_STATUS_INVALID; +} + bool datapagemap_is_set(datapagemap_t *map, BlockNumber blkno) { diff --git a/src/bin/pg_probackup/validate.cpp b/src/bin/pg_probackup/validate.cpp index 8faaa0d31d..df24982c51 100644 --- a/src/bin/pg_probackup/validate.cpp +++ b/src/bin/pg_probackup/validate.cpp @@ -18,6 +18,7 @@ #include "common/fe_memutils.h" #include "storage/file/fio_device.h" #include "logger.h" +#include "oss/include/restore.h" static void *pgBackupValidateFiles(void *arg); static void do_validate_instance(void); @@ -25,26 +26,6 @@ static void do_validate_instance(void); static bool corrupted_backup_found = false; static bool skipped_due_to_lock = false; -typedef struct -{ - const char *base_path; - const char *dss_path; - parray *files; - bool corrupted; - XLogRecPtr stop_lsn; - uint32 checksum_version; - uint32 backup_version; - BackupMode backup_mode; - const char *external_prefix; - HeaderMap *hdr_map; - - /* - * Return value from the thread. - * 0 means there is no error, 1 - there is an error. - */ - int ret; -} validate_files_arg; - /* Progress Counter */ static int g_inregularFiles = 0; static int g_doneFiles = 0; @@ -111,7 +92,7 @@ bool pre_check_backup(pgBackup *backup) return false; } - /* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */ + /* Revalidation is attempted for DONE, ORPHAN, LOCAL and CORRUPT backups */ if (backup->status != BACKUP_STATUS_OK && backup->status != BACKUP_STATUS_DONE && backup->status != BACKUP_STATUS_ORPHAN && @@ -669,7 +650,11 @@ do_validate_instance(void) continue; } /* Valiate backup files*/ - pgBackupValidate(current_backup, NULL); + if (current.media_type == MEDIA_TYPE_OSS && current.oss_status != OSS_STATUS_LOCAL) { + performRestoreOrValidate(current_backup, true); + } else if (current.media_type != MEDIA_TYPE_OSS || current.oss_status == OSS_STATUS_LOCAL) { + pgBackupValidate(current_backup, NULL); + } /* Validate corresponding WAL files */ if (current_backup->status == BACKUP_STATUS_OK) diff --git a/src/include/storage/file/fio_device_com.h b/src/include/storage/file/fio_device_com.h index 47faaf5d24..c5159fc918 100644 --- a/src/include/storage/file/fio_device_com.h +++ b/src/include/storage/file/fio_device_com.h @@ -38,6 +38,13 @@ typedef enum en_device_type { DEV_TYPE_INVALID } device_type_t; +typedef enum en_status_type { + OSS_STATUS_LOCAL = 0, + OSS_STATUS_OSS, + OSS_STATUS_NUM, + OSS_STATUS_INVALID +} oss_status_t; + extern bool g_enable_dss; extern uint64 XLogSegmentSize; diff --git a/src/include/tool_common.h b/src/include/tool_common.h index 5a2354cd46..a6cd75f1f6 100644 --- a/src/include/tool_common.h +++ b/src/include/tool_common.h @@ -132,6 +132,15 @@ typedef struct DssOptions { char *socketpath; } DssOptions; +/* OSS conntct parameters */ +typedef struct OssOptions { + char *access_id; + char *access_key; + char *endpoint; + char *region; + char *access_bucket; +} OssOptions; + typedef struct SSInstanceConfig { DssOptions dss; } SSInstanceConfig; -- Gitee From 2bc1b621c311e6ea04b40053e693570c65de4607 Mon Sep 17 00:00:00 2001 From: wang-mingxuanHedgehog <504013468@qq.com> Date: Tue, 2 Jul 2024 17:30:35 +0800 Subject: [PATCH 022/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dxheap=E4=BD=BF?= =?UTF-8?q?=E7=94=A8=E4=B8=A4=E7=A7=8D=E6=96=B9=E5=BC=8F=E5=88=9B=E8=A1=A8?= =?UTF-8?q?=E7=BB=93=E6=9E=9C=E4=B8=8D=E5=90=8C=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/tablecmds.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 8d7f7d942d..0215068944 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -1239,7 +1239,10 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt DefElem* def = makeDefElem("compression", (Node*)makeString(COMPRESSION_LOW)); res = lappend(options, def); } - + if (isCStore && assignedStorageType) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), + errmsg("There is a conflict caused by storage_type and orientation"))); + } bool noSupportTable = segment || isCStore || isTsStore || relkind != RELKIND_RELATION || stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED || stmt->relation->relpersistence == RELPERSISTENCE_TEMP || -- Gitee From 1d7ee817a6ce05fee6e9762d1be8403df9e4c333 Mon Sep 17 00:00:00 2001 From: shirley_zhengx Date: Tue, 9 Jul 2024 16:48:22 +0800 Subject: [PATCH 023/347] forbid standby cluster to check disk lsn --- src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp index 654f21e52b..606fe99073 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp @@ -198,7 +198,10 @@ bool StartReadPage(BufferDesc *buf_desc, LWLockMode mode) } void SegPageCheckDiskLSNForRelease(BufferDesc *buf_desc) -{ +{ + if (SS_DISASTER_STANDBY_CLUSTER) { + return; + } dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(buf_desc->buf_id); RelFileNode rnode = buf_desc->tag.rnode; XLogRecPtr lsn_on_mem = PageGetLSN(BufHdrGetBlock(buf_desc)); -- Gitee From 627d9b634a0da8ed0131d4a89a3a1b8bdedf41ed Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Tue, 9 Jul 2024 17:36:07 +0800 Subject: [PATCH 024/347] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=A4=96=E9=94=AE?= =?UTF-8?q?=E7=BA=A6=E6=9D=9F=E6=97=B6=E7=9A=84=E9=94=81=E9=99=8D=E7=BA=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/tablecmds.cpp | 45 ++++++++++++++++--- .../optimizer/commands/trigger.cpp | 4 +- src/test/regress/expected/alter_table_003.out | 19 ++++++++ src/test/regress/sql/alter_table_003.sql | 20 +++++++++ 4 files changed, 79 insertions(+), 9 deletions(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 8d7f7d942d..a1b9b0bda9 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -8085,6 +8085,40 @@ LOCKMODE AlterTableGetLockLevel(List* cmds) } else { cmd_lockmode = ShareUpdateExclusiveLock; } + } else { + switch (cmd->subtype) { + /* + * These subcommands affect write operations only. + */ + case AT_EnableTrig: + case AT_EnableAlwaysTrig: + case AT_EnableReplicaTrig: + case AT_EnableTrigAll: + case AT_EnableTrigUser: + case AT_DisableTrig: + case AT_DisableTrigAll: + case AT_DisableTrigUser: + cmd_lockmode = ShareRowExclusiveLock; + break; + case AT_AddConstraint: + case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ + case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */ + case AT_ReAddConstraint: /* becomes AT_AddConstraint */ + if (IsA(cmd->def, Constraint)) { + Constraint *con = (Constraint *) cmd->def; + if (con->contype == CONSTR_FOREIGN) { + /* + * We add triggers to both tables when we add a + * Foreign Key, so the lock level must be at least + * as strong as CREATE TRIGGER. + */ + cmd_lockmode = ShareRowExclusiveLock; + } + } + break; + default: + break; + } } /* update with the higher lock mode */ lockmode = set_lockmode(lockmode, cmd_lockmode); @@ -14442,16 +14476,13 @@ static ObjectAddress ATAddForeignKeyConstraint(AlteredTableInfo* tab, Relation r ObjectAddress address; /* - * Grab an exclusive lock on the pk table, so that someone doesn't delete - * rows out from under us. (Although a lesser lock would do for that - * purpose, we'll need exclusive lock anyway to add triggers to the pk - * table; trying to start with a lesser lock will just create a risk of - * deadlock.) + * Grab ShareRowExclusiveLock on the pk table, so that someone doesn't + * delete rows out from under us. */ if (OidIsValid(fkconstraint->old_pktable_oid)) - pkrel = heap_open(fkconstraint->old_pktable_oid, AccessExclusiveLock); + pkrel = heap_open(fkconstraint->old_pktable_oid, ShareRowExclusiveLock); else - pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock); + pkrel = heap_openrv(fkconstraint->pktable, ShareRowExclusiveLock); /* * Validity checks (permission checks wait till we have the column diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index a6fafdc0da..5ac456ce5d 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -178,9 +178,9 @@ ObjectAddress CreateTrigger(CreateTrigStmt* stmt, const char* queryString, Oid r int ret = 0; if (OidIsValid(relOid)) - rel = heap_open(relOid, AccessExclusiveLock); + rel = heap_open(relOid, ShareRowExclusiveLock); else - rel = HeapOpenrvExtended(stmt->relation, AccessExclusiveLock, false, true); + rel = HeapOpenrvExtended(stmt->relation, ShareRowExclusiveLock, false, true); /* * Triggers must be on tables or views, and there are additional diff --git a/src/test/regress/expected/alter_table_003.out b/src/test/regress/expected/alter_table_003.out index 16580ed8df..040bc368ea 100644 --- a/src/test/regress/expected/alter_table_003.out +++ b/src/test/regress/expected/alter_table_003.out @@ -9396,5 +9396,24 @@ select * from t0; (0 rows) drop table if exists t0; +create table test_0509 (id int ,name varchar(10)); +create table test_0509_2(id int primary key); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_0509_2_pkey" for table "test_0509_2" +\parallel on 2 +begin +alter table test_0509 ADD COLUMN t2_id integer DEFAULT 1 NOT NULL CONSTRAINT fk_t2_id REFERENCES test_0509_2(id); +perform pg_sleep(1); +raise info 'xact1'; +end; +/ +begin +perform pg_sleep(0.5); +PERFORM * from test_0509_2; +raise info 'xact2'; +end; +/ +\parallel off +INFO: xact2 +INFO: xact1 \c postgres drop database test_first_after_B; diff --git a/src/test/regress/sql/alter_table_003.sql b/src/test/regress/sql/alter_table_003.sql index eda2e57782..9eec6683eb 100644 --- a/src/test/regress/sql/alter_table_003.sql +++ b/src/test/regress/sql/alter_table_003.sql @@ -3511,5 +3511,25 @@ alter table t0 change column c6 c int first, modify c text after x, change colum select * from t0; drop table if exists t0; + +create table test_0509 (id int ,name varchar(10)); +create table test_0509_2(id int primary key); + +\parallel on 2 +begin +alter table test_0509 ADD COLUMN t2_id integer DEFAULT 1 NOT NULL CONSTRAINT fk_t2_id REFERENCES test_0509_2(id); +perform pg_sleep(1); +raise info 'xact1'; +end; +/ + +begin +perform pg_sleep(0.5); +PERFORM * from test_0509_2; +raise info 'xact2'; +end; +/ +\parallel off + \c postgres drop database test_first_after_B; \ No newline at end of file -- Gitee From f9a385fa76ab073c66068fa6a43b72a57cd35a0f Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Tue, 9 Jul 2024 19:44:09 +0800 Subject: [PATCH 025/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dpipelined=20func?= =?UTF-8?q?=E8=B0=83=E7=94=A8core=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 6 +- .../expected/plpgsql/plpgsql_normal.out | 69 ++++++++++++++++++- .../regress/sql/plpgsql/plpgsql_normal.sql | 46 ++++++++++++- 3 files changed, 117 insertions(+), 4 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index 3c1427f763..c94d150eb3 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -2110,12 +2110,14 @@ void AutonomPipelinedFuncRewriteResult(PLpgSQL_execstate *estate) TupleTableSlot *slot = MakeSingleTupleTableSlot(estate->rettupdesc); int index = 0; while (tuplestore_gettupleslot(tuple_store, true, false, slot)) { + Datum datum; if (estate->pipelined_resistuple) { isNulls[index] = false; - values[index] = datumCopy(ExecFetchSlotTupleDatum(slot), elemByVal, elemLen); + datum = ExecFetchSlotTupleDatum(slot); } else { - values[index] = heap_slot_getattr(slot, 1, &isNulls[index]); + datum = heap_slot_getattr(slot, 1, &isNulls[index]); } + values[index] = datumCopy(datum, elemByVal, elemLen); index++; } diff --git a/src/test/regress/expected/plpgsql/plpgsql_normal.out b/src/test/regress/expected/plpgsql/plpgsql_normal.out index d68ba926a6..888e15d6e1 100644 --- a/src/test/regress/expected/plpgsql/plpgsql_normal.out +++ b/src/test/regress/expected/plpgsql/plpgsql_normal.out @@ -346,9 +346,74 @@ ERROR: cannot perform a DML operation inside a query CONTEXT: SQL statement "insert into test values(5)" PL/pgSQL function get_tab_ptf_failed() line 5 at SQL statement referenced column: get_tab_ptf_failed +CREATE OR REPLACE PACKAGE pkg0016 AS +TYPE array_type_0016_1 AS varray(10) OF char(10); +FUNCTION func_pipelined_0016(count NUMBER) RETURN array_type_0016_1 pipelined; +END pkg0016; +/ +CREATE OR REPLACE PACKAGE BODY pkg0016 AS +FUNCTION func_pipelined_0016(count NUMBER) RETURN array_type_0016_1 pipelined IS +declare result array_type_0016_1; +BEGIN +FOR i IN 1..count LOOP +result := '{1}'; +pipe row(result); +pipe row(1); +pipe row(date'2022-01-01'); +pipe row(123456.1); +pipe row(-123456.1); +END LOOP; +RETURN; +END; +END pkg0016; +/ +select pkg0016.func_pipelined_0016(2); + func_pipelined_0016 +----------------------------------------------------------------------------------------------------------------------------------------- + {"{\"1 \"}",1,"Sat Jan 01 00:00:00 2022",123456.1,-123456.1,"{\"1 \"}",1,"Sat Jan 01 00:00:00 2022",123456.1,-123456.1} +(1 row) + +CREATE OR REPLACE PACKAGE BODY pkg0016 AS +FUNCTION func_pipelined_0016(count NUMBER) RETURN array_type_0016_1 pipelined IS +declare result array_type_0016_1; +PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN +FOR i IN 1..count LOOP +result := '{1}'; +pipe row(result); +pipe row(1); +pipe row(date'2022-01-01'); +pipe row(123456.1); +pipe row(-123456.1); +END LOOP; +RETURN; +END; +END pkg0016; +/ +select pkg0016.func_pipelined_0016(2); + func_pipelined_0016 +----------------------------------------------------------------------------------------------------------------------------------------- + {"{\"1 \"}",1,"Sat Jan 01 00:00:00 2022",123456.1,-123456.1,"{\"1 \"}",1,"Sat Jan 01 00:00:00 2022",123456.1,-123456.1} +(1 row) + +select * from pkg0016.func_pipelined_0016(2); + func_pipelined_0016 +-------------------------- + {"1 "} + 1 + Sat Jan 01 00:00:00 2022 + 123456.1 + -123456.1 + {"1 "} + 1 + Sat Jan 01 00:00:00 2022 + 123456.1 + -123456.1 +(10 rows) + reset search_path; drop schema plpgsql_pipelined cascade; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 20 other objects DETAIL: drop cascades to type plpgsql_pipelined.t_tf_row drop cascades to type plpgsql_pipelined.t_tf_tab drop cascades to function plpgsql_pipelined.get_tab_ptf(numeric) @@ -367,3 +432,5 @@ drop cascades to function plpgsql_pipelined.func_pipelined_022(numeric) drop cascades to table plpgsql_pipelined.test drop cascades to function plpgsql_pipelined.insert_test() drop cascades to function plpgsql_pipelined.get_tab_ptf_failed() +--?.* +drop cascades to function plpgsql_pipelined.func_pipelined_0016(numeric) diff --git a/src/test/regress/sql/plpgsql/plpgsql_normal.sql b/src/test/regress/sql/plpgsql/plpgsql_normal.sql index 09c0a4ae3d..807b63312b 100644 --- a/src/test/regress/sql/plpgsql/plpgsql_normal.sql +++ b/src/test/regress/sql/plpgsql/plpgsql_normal.sql @@ -190,5 +190,49 @@ $BODY$; select get_tab_ptf_failed(); +CREATE OR REPLACE PACKAGE pkg0016 AS +TYPE array_type_0016_1 AS varray(10) OF char(10); +FUNCTION func_pipelined_0016(count NUMBER) RETURN array_type_0016_1 pipelined; +END pkg0016; +/ + +CREATE OR REPLACE PACKAGE BODY pkg0016 AS +FUNCTION func_pipelined_0016(count NUMBER) RETURN array_type_0016_1 pipelined IS +declare result array_type_0016_1; +BEGIN +FOR i IN 1..count LOOP +result := '{1}'; +pipe row(result); +pipe row(1); +pipe row(date'2022-01-01'); +pipe row(123456.1); +pipe row(-123456.1); +END LOOP; +RETURN; +END; +END pkg0016; +/ +select pkg0016.func_pipelined_0016(2); + +CREATE OR REPLACE PACKAGE BODY pkg0016 AS +FUNCTION func_pipelined_0016(count NUMBER) RETURN array_type_0016_1 pipelined IS +declare result array_type_0016_1; +PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN +FOR i IN 1..count LOOP +result := '{1}'; +pipe row(result); +pipe row(1); +pipe row(date'2022-01-01'); +pipe row(123456.1); +pipe row(-123456.1); +END LOOP; +RETURN; +END; +END pkg0016; +/ + +select pkg0016.func_pipelined_0016(2); +select * from pkg0016.func_pipelined_0016(2); reset search_path; -drop schema plpgsql_pipelined cascade; \ No newline at end of file +drop schema plpgsql_pipelined cascade; -- Gitee From c492c5aea0458e577dcf91decda5076d705d797b Mon Sep 17 00:00:00 2001 From: chen-chao666 <1790599142@qq.com> Date: Wed, 10 Jul 2024 09:23:25 +0800 Subject: [PATCH 026/347] =?UTF-8?q?=E6=95=85=E9=9A=9C=E6=B3=A8=E5=85=A5?= =?UTF-8?q?=E6=A1=86=E6=9E=B6trigger=E6=96=B0=E5=A2=9Epoint=E5=8F=82?= =?UTF-8?q?=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms.cpp | 1 + .../ddes/adapter/ss_dms_callback.cpp | 2 +- src/gausskernel/ddes/adapter/ss_dms_fi.cpp | 9 ++++++ src/gausskernel/ddes/ddes_commit_id | 2 +- src/include/ddes/dms/dms_api.h | 27 ++++++++++++++--- src/include/ddes/dms/ss_dms.h | 1 + src/include/ddes/dms/ss_dms_fi.h | 29 ++++++++++--------- 7 files changed, 51 insertions(+), 20 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms.cpp b/src/gausskernel/ddes/adapter/ss_dms.cpp index a6bd499d21..6e5e4bd278 100644 --- a/src/gausskernel/ddes/adapter/ss_dms.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms.cpp @@ -142,6 +142,7 @@ int ss_dms_func_init() SS_RETURN_IFERR(DMS_LOAD_SYMBOL_FUNC(dms_fi_get_tls_trigger_custom)); SS_RETURN_IFERR(DMS_LOAD_SYMBOL_FUNC(dms_fi_set_tls_trigger_custom)); SS_RETURN_IFERR(DMS_LOAD_SYMBOL_FUNC(fault_injection_call)); + SS_RETURN_IFERR(DMS_LOAD_SYMBOL_FUNC(dms_fi_entry_custom_valid)); #endif g_ss_dms_func.inited = true; diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 55cf1857df..432f2c087c 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -841,7 +841,7 @@ static int CBInvalidatePage(void *db_handle, char pageid[DMS_PAGEID_SIZE], unsig } SS_FAULT_INJECTION_CALL(DB_FI_CHANGE_BUFFERTAG_BLOCKNUM, dms_fi_change_buffertag_blocknum); - FAULT_INJECTION_ACTION_TRIGGER_CUSTOM(tag->blockNum += 1); + FAULT_INJECTION_ACTION_TRIGGER_CUSTOM(DB_FI_CHANGE_BUFFERTAG_BLOCKNUM, tag->blockNum += 1); if (!BUFFERTAGS_PTR_EQUAL(&buf_desc->tag, tag)) { DmsReleaseBuffer(buf_id + 1, IsSegmentBufferID(buf_id)); buftag_equal = false; diff --git a/src/gausskernel/ddes/adapter/ss_dms_fi.cpp b/src/gausskernel/ddes/adapter/ss_dms_fi.cpp index 19830c06a9..89c9cb1346 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_fi.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_fi.cpp @@ -58,6 +58,14 @@ void dms_fi_set_tls_trigger_custom(int val) } } +unsigned char dms_fi_entry_custom_valid(unsigned int point) +{ + if (g_ss_dms_func.inited) { + return g_ss_dms_func.dms_fi_entry_custom_valid(point); + } + return GS_ERROR; +} + void dms_fi_change_buffertag_blocknum(const dms_fi_entry *entry, va_list args) { dms_fi_set_tls_trigger_custom(TRUE); @@ -67,5 +75,6 @@ int dms_fi_set_entries(unsigned int type, unsigned int *entries, unsigned int co int dms_fi_set_entry_value(unsigned int type, unsigned int value) {return GS_ERROR;} int dms_fi_get_tls_trigger_custom() {return GS_ERROR;} void dms_fi_set_tls_trigger_custom(int val) {} +unsigned char dms_fi_entry_custom_valid(unsigned int point) {return GS_ERROR;} void dms_fi_change_buffertag_blocknum(const dms_fi_entry *entry, va_list args) {}; #endif \ No newline at end of file diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 61cb042df4..8391af312a 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=6341f295057ae1f987f991b3a060acfb97e143c5 +dms_commit_id=c6fe7641f4f13fb961ceebe1f8725865a2afe3ee dss_commit_id=04b09e0414525a7bb78b8baaabfedcc675f91102 cbb_commit_id=d22f1e92dc9fc75efa8f0f027321faf0fd1c770b diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index 2769c3a188..d8cb1d4752 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -34,7 +34,7 @@ extern "C" { #define DMS_LOCAL_MINOR_VER_WEIGHT 1000 #define DMS_LOCAL_MAJOR_VERSION 0 #define DMS_LOCAL_MINOR_VERSION 0 -#define DMS_LOCAL_VERSION 156 +#define DMS_LOCAL_VERSION 159 #define DMS_SUCCESS 0 #define DMS_ERROR (-1) @@ -788,7 +788,7 @@ typedef struct st_dms_broadcast_info { unsigned char check_session_kill; } dms_broadcast_info_t; -typedef enum st_dms_stat_cmd { +typedef enum en_dms_stat_cmd { DMS_STAT_ASK_MASTER, DMS_STAT_ASK_OWNER, DMS_STAT_ASK_CR_PAGE, @@ -987,7 +987,8 @@ typedef int (*dms_stop_lrpl)(void *db_handle, int is_reformer); typedef int (*dms_az_switchover_demote_phase1)(void *db_handle); typedef int (*dms_az_switchover_demote_approve)(void *db_handle); typedef int (*dms_az_switchover_demote_phase2)(void *db_handle); -typedef int (*dms_az_switchover_promote_core)(void *db_handle); +typedef int (*dms_az_switchover_promote_phase1)(void *db_handle); +typedef int (*dms_az_switchover_promote_phase2)(void *db_handle); typedef void (*dms_dyn_log)(void *db_handle, long long dyn_log_time); typedef int (*dms_invld_alock_ownership)(void *db_handle, char *resid, unsigned char req_mode, unsigned char is_try); @@ -1176,7 +1177,8 @@ typedef struct st_dms_callback { dms_az_switchover_demote_phase1 az_switchover_demote_phase1; dms_az_switchover_demote_approve az_switchover_demote_approve; dms_az_switchover_demote_phase2 az_switchover_demote_phase2; - dms_az_switchover_promote_core az_switchover_promote; + dms_az_switchover_promote_phase1 az_switchover_promote_phase1; + dms_az_switchover_promote_phase2 az_switchover_promote_phase2; dms_az_failover_promote_phase1 az_failover_promote_phase1; dms_az_failover_promote_resetlog az_failover_promote_resetlog; dms_az_failover_promote_phase2 az_failover_promote_phase2; @@ -1303,6 +1305,10 @@ typedef enum en_reform_callback_stat { REFORM_MES_TASK_STAT_CONFIRM_CVT_SS_READ_LOCK, REFORM_CALLBACK_STAT_REBUILD_ALOCK_LOCAL, REFORM_CALLBACK_STAT_REBUILD_DRC_ALOCK_REMOTE, + REFORM_CALLBACK_STAT_OPEN_DATAFILE, + REFORM_CALLBACK_STAT_GET_DATAFILE_SIZE, + REFORM_CALLBACK_STAT_OPEN_CTRLFILE, + REFORM_CALLBACK_STAT_GET_CTRLFILE_SIZE, REFORM_CALLBACK_STAT_COUNT } reform_callback_stat_e; @@ -1316,6 +1322,18 @@ typedef enum e_dms_fi_type { DMS_FI_TYPE_END, } dms_fi_type_e; +typedef enum en_db_call_dms_trigger_fi_point_name { + // call in db, trigger in dms, point range[10800, DB_FI_ENTRY_END] + DB_FI_CHANGE_STATUS_AFTER_TRANSFER_PAGE = 10800, + DB_FI_ENTRY_END = FI_ENTRY_END +} db_call_dms_trigger_fi_point_name; + +typedef enum en_dms_call_db_trigger_fi_point_name { + // call in dms, trigger in db, point range[800, DMS_FI_ENTRY_END] + DMS_FI_TRIGGER_IN_DB_ENTRY_BEGIN = 800, + DMS_FI_ENTRY_END +} dms_call_db_trigger_fi_point_name; + struct dms_fi_entry { int pointId; unsigned int faultFlags; @@ -1343,6 +1361,7 @@ typedef struct thread_set { typedef struct st_driver_ping_info { unsigned long long rw_bitmap; + dms_role_t dms_role; unsigned long long major_version; unsigned long long minor_version; } driver_ping_info_t; diff --git a/src/include/ddes/dms/ss_dms.h b/src/include/ddes/dms/ss_dms.h index af48ace983..25d21d2c56 100644 --- a/src/include/ddes/dms/ss_dms.h +++ b/src/include/ddes/dms/ss_dms.h @@ -98,6 +98,7 @@ typedef struct st_ss_dms_func { int (*dms_fi_get_tls_trigger_custom)(void); void (*dms_fi_set_tls_trigger_custom)(int val); void (*fault_injection_call)(unsigned int point, ...); + unsigned char (*dms_fi_entry_custom_valid)(unsigned int point); } ss_dms_func_t; int ss_dms_func_init(); diff --git a/src/include/ddes/dms/ss_dms_fi.h b/src/include/ddes/dms/ss_dms_fi.h index f9d7bc1716..ff67c509f0 100644 --- a/src/include/ddes/dms/ss_dms_fi.h +++ b/src/include/ddes/dms/ss_dms_fi.h @@ -34,35 +34,36 @@ extern "C" { #endif typedef enum en_db_fi_point_name { - DB_FI_CHANGE_BUFFERTAG_BLOCKNUM = DB_FI_ENTRY_BEGIN, - DB_FI_ENTRY_END = FI_ENTRY_END + // if CALL and TRIGGER both in kernel point range [10001, 10799] + DB_FI_CHANGE_BUFFERTAG_BLOCKNUM = DB_FI_ENTRY_BEGIN + 1, } db_fi_point_name; int dms_fi_set_entries(unsigned int type, unsigned int *entries, unsigned int count); int dms_fi_set_entry_value(unsigned int type, unsigned int value); int dms_fi_get_tls_trigger_custom(void); void dms_fi_set_tls_trigger_custom(int val); +unsigned char dms_fi_entry_custom_valid(unsigned int point); void dms_fi_change_buffertag_blocknum(const dms_fi_entry *entry, va_list args); #ifdef USE_ASSERT_CHECKING -#define FAULT_INJECTION_ACTION_TRIGGER_CUSTOM(action) \ - do { \ - if (dms_fi_get_tls_trigger_custom() == TRUE) { \ - dms_fi_set_tls_trigger_custom(FALSE); \ +#define FAULT_INJECTION_ACTION_TRIGGER_CUSTOM(point, action) \ + do { \ + if (dms_fi_entry_custom_valid(point) && dms_fi_get_tls_trigger_custom() == TRUE) { \ + dms_fi_set_tls_trigger_custom(FALSE); \ ereport(DEBUG1, (errmsg("[KERNEL_FI] fi custom action happens at %s", __FUNCTION__))); \ - action; \ - } \ + action; \ + } \ } while (0) -#define SS_FAULT_INJECTION_CALL(point, ...) \ - do { \ - if (g_ss_dms_func.inited) { \ - g_ss_dms_func.fault_injection_call(point, ##__VA_ARGS__); \ - } \ +#define SS_FAULT_INJECTION_CALL(point, ...) \ + do { \ + if (g_ss_dms_func.inited) { \ + g_ss_dms_func.fault_injection_call(point, ##__VA_ARGS__); \ + } \ } while (0) #else -#define FAULT_INJECTION_ACTION_TRIGGER_CUSTOM(action) +#define FAULT_INJECTION_ACTION_TRIGGER_CUSTOM(point, action) #define SS_FAULT_INJECTION_CALL(point, ...) #endif -- Gitee From 008fb65d2532ae6c267f62facec1bde2f49005c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Fri, 28 Jun 2024 08:34:00 +0000 Subject: [PATCH 027/347] =?UTF-8?q?=E6=97=A5=E5=BF=97=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc.cpp | 2 + src/common/backend/utils/misc/guc/guc_sql.cpp | 30 +- .../backend/utils/misc/guc/guc_storage.cpp | 18 - src/gausskernel/optimizer/commands/verify.cpp | 89 ++- .../storage/access/redo/redo_ubtxlog.cpp | 4 +- .../storage/access/transam/clog.cpp | 4 - .../storage/access/ubtree/ubtdump.cpp | 622 +++++++++++------- .../storage/access/ubtree/ubtinsert.cpp | 61 +- .../storage/access/ubtree/ubtrecycle.cpp | 210 +++++- .../storage/access/ubtree/ubtree.cpp | 2 +- .../storage/access/ubtree/ubtsearch.cpp | 12 - .../storage/access/ubtree/ubtutils.cpp | 5 + .../storage/access/ustore/Makefile | 2 +- .../storage/access/ustore/knl_pruneuheap.cpp | 7 +- .../access/ustore/knl_uextremeredo.cpp | 12 +- .../storage/access/ustore/knl_uheap.cpp | 79 +-- .../storage/access/ustore/knl_undoaction.cpp | 14 +- .../storage/access/ustore/knl_upage.cpp | 556 +++++++--------- .../storage/access/ustore/knl_uredo.cpp | 130 ++-- .../storage/access/ustore/knl_uundorecord.cpp | 82 ++- .../storage/access/ustore/knl_uundovec.cpp | 26 +- .../storage/access/ustore/knl_uverify.cpp | 206 ------ .../access/ustore/undo/knl_uundoapi.cpp | 16 +- .../access/ustore/undo/knl_uundorecycle.cpp | 2 + .../access/ustore/undo/knl_uundospace.cpp | 1 + .../access/ustore/undo/knl_uundotxn.cpp | 123 +++- .../access/ustore/undo/knl_uundozone.cpp | 66 +- src/include/access/ubtree.h | 17 +- src/include/access/ustore/knl_upage.h | 19 +- src/include/access/ustore/knl_utuple.h | 6 + src/include/access/ustore/knl_uundorecord.h | 2 + src/include/access/ustore/knl_uundovec.h | 1 - src/include/access/ustore/knl_uverify.h | 95 ++- src/include/access/ustore/undo/knl_uundotxn.h | 8 +- .../access/ustore/undo/knl_uundozone.h | 5 +- .../knl/knl_guc/knl_session_attr_storage.h | 1 + 36 files changed, 1349 insertions(+), 1186 deletions(-) delete mode 100644 src/gausskernel/storage/access/ustore/knl_uverify.cpp diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 470e340692..f805cb740b 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -14120,6 +14120,7 @@ static void analysis_options_guc_assign(const char* newval, void* extra) #define DEFAULT_SYNC_ROLLBACK true #define DEFAULT_ASYNC_ROLLBACK true #define DEFAULT_PAGE_ROLLBACK true +#define DEFAULT_USTORE_VERIFY false static void InitUStoreAttr() { @@ -14129,6 +14130,7 @@ static void InitUStoreAttr() u_sess->attr.attr_storage.umax_search_length_for_prune = DEFAULT_UMAX_PRUNE_SEARCH_LEN; u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_DEFAULT; u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_INVALID; + u_sess->attr.attr_storage.ustore_verify = DEFAULT_USTORE_VERIFY; u_sess->attr.attr_storage.enable_ustore_sync_rollback = DEFAULT_SYNC_ROLLBACK; u_sess->attr.attr_storage.enable_ustore_async_rollback = DEFAULT_ASYNC_ROLLBACK; u_sess->attr.attr_storage.enable_ustore_page_rollback = DEFAULT_PAGE_ROLLBACK; diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index 313b9fdb59..f0b6dcd8aa 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -3936,15 +3936,13 @@ static void ParseUstoreVerifyLevel(int* mLevel, char* ptoken, const char* pdelim if (strcasecmp(ptoken, "NONE") == 0) { setVal = (int) USTORE_VERIFY_NONE; } else if (strcasecmp(ptoken, "FAST") == 0) { - setVal = (int) USTORE_VERIFY_DEFAULT; - } else if (strcasecmp(ptoken, "NORMAL") == 0) { setVal = (int) USTORE_VERIFY_FAST; - } else if (strcasecmp(ptoken, "SLOW") == 0) { + } else if (strcasecmp(ptoken, "COMPLETE") == 0) { setVal = (int) USTORE_VERIFY_COMPLETE; } else { - setVal = 0; + setVal = USTORE_VERIFY_DEFAULT; ereport(LOG, (errmodule(MOD_GUC), - errmsg("Invalid parameter settings, only support fast, normal and slow value."))); + errmsg("Invalid parameter settings, only support none, fast and complete value."))); } } @@ -4067,32 +4065,30 @@ static bool IsValidUstoreAttrValues(const char* keyStr, char* value) strcasecmp(keyStr, "enable_ustore_sync_rollback") == 0 || strcasecmp(keyStr, "enable_ustore_async_rollback") == 0 || strcasecmp(keyStr, "enable_ustore_page_rollback") == 0) { - return (strcasecmp(value, "true") == 0 || strcasecmp(value, "false") == 0) ? true : false; + return strcasecmp(value, "true") == 0 || strcasecmp(value, "false") == 0; } else if (strcasecmp(keyStr, "ustats_tracker_naptime") == 0) { int keyValues = pg_strtoint32(value); - return (keyValues >= MIN_USTATS_TRACKER_NAPTIME && keyValues <= MAX_USTATS_TRACKER_NAPTIME) ? - true : false; + return keyValues >= MIN_USTATS_TRACKER_NAPTIME && keyValues <= MAX_USTATS_TRACKER_NAPTIME; } else if (strcasecmp(keyStr, "umax_search_length_for_prune") == 0) { int keyValues = pg_strtoint32(value); - return (keyValues >= MIN_UMAX_PRUNE_SEARCH_LEN && keyValues <= MAX_UMAX_PRUNE_SEARCH_LEN) ? - true : false; + return keyValues >= MIN_UMAX_PRUNE_SEARCH_LEN && keyValues <= MAX_UMAX_PRUNE_SEARCH_LEN; #ifdef ENABLE_WHITEBOX } else if (strcasecmp(keyStr, "ustore_unit_test") == 0) { - return (strlen(value) != 0) true : false; + return strlen(value) != 0; #endif } else if (strcasecmp(keyStr, "ustore_verify_level") == 0) { - return (strcasecmp(value, "none") == 0 || strcasecmp(value, "fast") == 0 || - strcasecmp(value, "normal") == 0 || strcasecmp(value, "slow") == 0) ? true : false; + return strcasecmp(value, "none") == 0 || strcasecmp(value, "fast") == 0 || strcasecmp(value, "complete") == 0 || + strcasecmp(value, "normal") == 0 || strcasecmp(value, "slow") == 0; } else if (strcasecmp(keyStr, "ustore_verify_module") == 0) { char *psave = NULL; const char* pdelimiter = ":"; - return ValidateVerifyModules(value, pdelimiter, psave) ? true : false; + return ValidateVerifyModules(value, pdelimiter, psave); } else if (strcasecmp(keyStr, "index_trace_level") == 0) { - return (strcasecmp(value, "no") == 0 || strcasecmp(value, "normal") == 0 || + return strcasecmp(value, "no") == 0 || strcasecmp(value, "normal") == 0 || strcasecmp(value, "visibility") == 0 || strcasecmp(value, "showhikey") == 0 || - strcasecmp(value, "all") == 0) ? true : false; + strcasecmp(value, "all") == 0; } else if (strcasecmp(keyStr, "enable_log_tuple") == 0) { - return (strcasecmp(value, "off") == 0) ? true : false; + return strcasecmp(value, "off") == 0; } return false; } diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 06f573f0c1..7742319b41 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -7083,24 +7083,6 @@ static bool check_ss_work_thread_pool_attr(char** newval, void** extra, GucSourc return true; } -static bool check_undo_space_limit_size(int *newval, void **extra, GucSource source) -{ - if (*newval < u_sess->attr.attr_storage.undo_limit_size_transaction && - u_sess->attr.attr_storage.undo_limit_size_transaction != 0) { - return false; - } - return true; -} - -static bool check_undo_limit_size_per_transaction(int *newval, void **extra, GucSource source) -{ - if (*newval > u_sess->attr.attr_storage.undo_space_limit_size && - u_sess->attr.attr_storage.undo_space_limit_size != 0) { - return false; - } - return true; -} - #ifdef USE_ASSERT_CHECKING static bool parse_ss_fi_entry_list(char *value, uint32 *entry_list, uint32 *count) { diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp index 9baced76db..f56f5c5963 100644 --- a/src/gausskernel/optimizer/commands/verify.cpp +++ b/src/gausskernel/optimizer/commands/verify.cpp @@ -39,6 +39,7 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "access/heapam.h" +#include "access/ubtree.h" #ifdef PGXC #include "pgxc/pgxc.h" #include "pgxc/redistrib.h" @@ -61,17 +62,17 @@ static void VerifyPartIndexRel(VacuumStmt* stmt, Relation rel, Relation partitio static void VerifyIndexRels(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL); static void VerifyIndexRel(VacuumStmt* stmt, Relation indexRel, VerifyDesc* checkCudesc = NULL); static void VerifyRowRels(VacuumStmt* stmt, Relation parentRel, Relation rel); -static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL); -static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL); -static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc = NULL); -static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc = NULL); +static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); +static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); +static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); +static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); static void VerifyColRels(VacuumStmt* stmt, Relation parentRel, Relation rel); static void VerifyColRel(VacuumStmt* stmt, Relation rel); static void VerifyColRelFast(Relation rel); static void VerifyColRelComplete(Relation rel); static void reportColVerifyFailed( Relation rel, bool isdesc = false, bool iscomplete = false, BlockNumber cuId = 0, int col = 0); - +static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, ForkNumber forkNum); /* * MainCatalogRelid is used to analyse verify the main system tables. */ @@ -1138,6 +1139,9 @@ static void VerifyIndexRel(VacuumStmt* stmt, Relation indexRel, VerifyDesc* chec relation_close(psortColRel, AccessShareLock); } else { VerifyRowRel(stmt, indexRel, checkCudesc); + if (RelationIsUstoreIndex(indexRel)) { + VerifyRowRel(stmt, indexRel, checkCudesc, FSM_FORKNUM); + } } } PG_CATCH(); @@ -1217,23 +1221,23 @@ static void VerifyRowRels(VacuumStmt* stmt, Relation parentRel, Relation rel) * @in&out checkCudesc - checkCudesc is a struct to judge whether cudesc tables is damaged. * @return: void */ -static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc) +static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) { /* turn off the remote read and keep the old mode */ int oldRemoteReadMode = SetRemoteReadModeOffAndGetOldMode(); bool isValidRelationPage = true; Oid relid = RelationGetRelid(rel); - isValidRelationPage = VerifyRowRelFull(stmt, rel, checkCudesc); + isValidRelationPage = VerifyRowRelFull(stmt, rel, checkCudesc, forkNum); SetRemoteReadMode(oldRemoteReadMode); if (!isValidRelationPage && IsMainCatalogObjectForVerify(relid)) { ereport(FATAL, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("The important catalog table %s.%s corrupts, the node is %s, please fix it.", + errmsg("The important catalog table %s.%s corrupts, the node is %s, forknum %d please fix it.", get_namespace_name(RelationGetNamespace(rel)), RelationGetRelationName(rel), - g_instance.attr.attr_common.PGXCNodeName), + g_instance.attr.attr_common.PGXCNodeName, forkNum), handle_in_client(true))); } @@ -1249,7 +1253,7 @@ static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc * @in&out checkCudesc - checkCudesc is a struct to judge whether cudesc tables is damaged. * @return: bool */ -static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) +static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) { if (unlikely(rel == NULL)) { ereport(ERROR, @@ -1263,7 +1267,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) char* buf = (char*)palloc(BLCKSZ); BlockNumber nblocks; BlockNumber blkno; - ForkNumber forkNum = MAIN_FORKNUM; + bool isValidRelationPage = true; char* namespace_name = get_namespace_name(RelationGetNamespace(rel)); @@ -1297,13 +1301,8 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) if (rdStatus != SMGR_RD_CRC_ERROR) { /* Ustrore white-box verification adapt to analyze verify. */ if (rdStatus == SMGR_RD_OK) { - UPageVerifyParams verifyParam; Page page = (char *) buf; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_FAST, - (char *) &verifyParam, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, ANALYZE_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParam); - } + VerifyUstorePage(rel, page, blkno, forkNum); } continue; } @@ -1331,13 +1330,8 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) addGlobalRepairBadBlockStat(src->smgr_rnode, forkNum, blkno); } else if (rdStatus == SMGR_RD_OK) { /* Ustrore white-box verification adapt to analyze verify. */ - UPageVerifyParams verifyParam; Page page = (char *) buf; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_FAST, - (char *) &verifyParam, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, ANALYZE_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParam); - } + VerifyUstorePage(rel, page, blkno, forkNum); } } @@ -1354,7 +1348,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) * @in&out checkCudesc - checkCudesc is a struct to judge whether cudesc tables is damaged. * @return: bool */ -static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc) +static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) { if (RELATION_IS_GLOBAL_TEMP(rel) && !gtt_storage_attached(RelationGetRelid(rel))) { return true; @@ -1366,7 +1360,6 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc) Datum* values = NULL; bool* nulls = NULL; int numberOfAttributes = 0; - ForkNumber forkNum = MAIN_FORKNUM; bool isValidRelationPageFast = true; bool isValidRelationPageComplete = true; SMgrRelation smgrRel = NULL; @@ -1383,7 +1376,7 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc) MemoryContext oldMemContext = MemoryContextSwitchTo(verifyRowMemContext); /* check page header and crc first */ - isValidRelationPageFast = VerifyRowRelFast(rel, checkCudesc); + isValidRelationPageFast = VerifyRowRelFast(rel, checkCudesc, forkNum); /* check all tuples of ustore relation. */ buf = (char*)palloc(BLCKSZ); @@ -1394,13 +1387,8 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc) CHECK_FOR_INTERRUPTS(); SMGR_READ_STATUS rdStatus = smgrread(smgrRel, forkNum, blkno, buf); if (rdStatus == SMGR_RD_OK) { - UPageVerifyParams verifyParam; Page page = (char *) buf; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParam, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, ANALYZE_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParam); - } + VerifyUstorePage(rel, page, blkno, forkNum); } } pfree_ext(buf); @@ -1496,9 +1484,9 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc) return (isValidRelationPageFast && isValidRelationPageComplete); } -static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc) +static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) { - bool (*verifyfunc)(Relation, VerifyDesc*); + bool (*verifyfunc)(Relation, VerifyDesc*, ForkNumber); Relation bucketRel = NULL; if ((unsigned int)stmt->options & VACOPT_FAST) { @@ -1515,7 +1503,7 @@ static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCu Assert(checkCudesc == NULL); for (int i = 0; i < bucketlist->dim1; i++) { bucketRel = bucketGetRelation(rel, NULL, bucketlist->values[i]); - if (verifyfunc(bucketRel, NULL) == false) { + if (verifyfunc(bucketRel, NULL, forkNum) == false) { bucketCloseRelation(bucketRel); return false; } @@ -1523,7 +1511,7 @@ static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCu } return true; } else - return verifyfunc(rel, checkCudesc); + return verifyfunc(rel, checkCudesc, forkNum); } @@ -1893,3 +1881,32 @@ void VerifyAbortBufferIO(void) TerminateBufferIO(buf, false, BM_IO_ERROR); } } + +static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, ForkNumber forkNum) +{ + if (!RelationIsUstoreIndex(rel) && !RelationIsUstoreFormat(rel)) { + return; + } + bool temp = false; + BEGIN_SAVE_VERIFY(temp); + PG_TRY(); + { + if (RelationIsUstoreIndex(rel)) { + if (forkNum == MAIN_FORKNUM && blkno != 0) { + UBTreeVerifyAll(rel, page, blkno, InvalidOffsetNumber, false); + } else if (forkNum == FSM_FORKNUM) { + UBTRecycleQueueVerifyPageOffline(rel, page, blkno); + } + } else { + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel); + } + + } + PG_CATCH(); + { + END_SAVE_VERIFY(temp) + PG_RE_THROW(); + } + PG_END_TRY(); + END_SAVE_VERIFY(temp); +} diff --git a/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp b/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp index 0d80909400..55da1d7e42 100644 --- a/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp +++ b/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp @@ -1341,14 +1341,14 @@ void UBTree2XlogRecycleQueueInitPageOperatorAdjacentPage(RedoBufferInfo* buffer, void UBTree2XlogRecycleQueueEndpointOperatorLeftPage(RedoBufferInfo* buffer, void* recorddata) { xl_ubtree2_recycle_queue_endpoint *xlrec = (xl_ubtree2_recycle_queue_endpoint *)recorddata; - UBTreeRecycleQueuePageChangeEndpointLeftPage(buffer->buf, xlrec->isHead); + UBTreeRecycleQueuePageChangeEndpointLeftPage(NULL, buffer->buf, xlrec->isHead); PageSetLSN(BufferGetPage(buffer->buf), buffer->lsn); } void UBTree2XlogRecycleQueueEndpointOperatorRightPage(RedoBufferInfo* buffer, void* recorddata) { xl_ubtree2_recycle_queue_endpoint *xlrec = (xl_ubtree2_recycle_queue_endpoint *)recorddata; - UBTreeRecycleQueuePageChangeEndpointRightPage(buffer->buf, xlrec->isHead); + UBTreeRecycleQueuePageChangeEndpointRightPage(NULL, buffer->buf, xlrec->isHead); PageSetLSN(BufferGetPage(buffer->buf), buffer->lsn); } diff --git a/src/gausskernel/storage/access/transam/clog.cpp b/src/gausskernel/storage/access/transam/clog.cpp index 4109ef7853..dcadf9fcd6 100644 --- a/src/gausskernel/storage/access/transam/clog.cpp +++ b/src/gausskernel/storage/access/transam/clog.cpp @@ -760,10 +760,6 @@ void StartupCLOG(void) */ void TrimCLOG(void) { - if (u_sess->attr.attr_storage.ustore_verify_level == USTORE_VERIFY_WHITEBOX) { - return; - } - if (SS_STANDBY_MODE) { ereport(WARNING, (errmodule(MOD_DMS), errmsg("DMS standby can't trim clog status"))); return; diff --git a/src/gausskernel/storage/access/ubtree/ubtdump.cpp b/src/gausskernel/storage/access/ubtree/ubtdump.cpp index 067dbea314..9b3bd338bd 100644 --- a/src/gausskernel/storage/access/ubtree/ubtdump.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtdump.cpp @@ -26,10 +26,15 @@ #include "access/transam.h" #include "access/ubtree.h" #include "utils/builtins.h" -#include "storage/procarray.h" +#include "storage/procarray.h" -typedef RpSort ItemIdSort; -typedef RpSortData ItemIdSortData; + +static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, + OffsetNumber firstPos, OffsetNumber lastPos); +static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno); +static void UBTreeVerifyHeader(PageHeaderData* page, Relation rel, BlockNumber blkno, uint16 pageSize, uint16 headerSize); +static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber blkno, OffsetNumber offset, + ItemIdSort indexSortPtr, const char *indexName, Oid relOid); void UBTreeVerifyIndex(Relation rel, TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols) { @@ -399,300 +404,451 @@ char* UBTGetVerifiedResultStr(uint32 type) } } -bool VerifyIndexPageHeader(Relation rel, Page page) +bool UBTreeVerifyTupleTransactionStatus(Relation rel, BlockNumber blkno, TransactionIdStatus xminStatus, TransactionIdStatus xmaxStatus, + TransactionId xmin, TransactionId xmax, CommitSeqNo xminCSN, CommitSeqNo xmaxCSN) { - PageHeader phdr = (PageHeader)page; - uint16 pdLower = phdr->pd_lower; - uint16 pdUpper = phdr->pd_upper; - uint16 pdSpecial = phdr->pd_special; - if (PageGetPageSize(phdr) != BLCKSZ || (phdr->pd_flags & ~PD_VALID_FLAG_BITS) != 0 || - pdLower < GetPageHeaderSize(page) || pdLower > pdUpper || pdUpper > pdSpecial || pdSpecial > BLCKSZ) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY index page header invalid: rel %s, size %lu, flags %u, lower %u, upper %u," - "special %u", NameStr(rel->rd_rel->relname), PageGetPageSize(phdr), phdr->pd_flags, pdLower, - pdUpper, pdSpecial))); + if (u_sess->attr.attr_storage.ustore_verify_level < USTORE_VERIFY_FAST) { + return false; } - return true; -} -bool VerifyIndexTuple(Relation rel, Page page, OffsetNumber offset) -{ - ItemId itemId = PageGetItemId(page, offset); - IndexTuple ituple = (IndexTuple)PageGetItem(page, itemId); - int tupSize = IndexTupleSize(ituple); - if (tupSize > (int)ItemIdGetLength(itemId)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted tuple: rel %s, tupsize = %d, rpsize = %u.", - NameStr(rel->rd_rel->relname), tupSize, ItemIdGetLength(itemId)))); - } - return true; -} + bool tranStatusError = false; + switch (xminStatus) { + case XID_COMMITTED: + tranStatusError = (xmaxStatus == XID_COMMITTED && xminCSN > xmaxCSN && xmaxCSN != COMMITSEQNO_FROZEN) ? true : false; + break; + case XID_INPROGRESS: + tranStatusError = (xmaxStatus == XID_COMMITTED && TransactionIdIsValid(xmax)) ? true : false; + break; + case XID_ABORTED: + tranStatusError = (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED) ? true : false; + break; -bool VerifyIndexOneItemId(Relation rel, Page page, OffsetNumber offset, ItemIdSort itemIdSortPtr) -{ - ItemId itemId = PageGetItemId(page, offset); - PageHeader phdr = (PageHeader)page; - uint16 pdUpper = phdr->pd_upper; - uint16 pdSpecial = phdr->pd_special; - if (!ItemIdIsUsed(itemId)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted unused line pointer: rel %s, offset = %u, rpstart = %u, rplen = %u.", - NameStr(rel->rd_rel->relname), offset, ItemIdGetOffset(itemId), ItemIdGetLength(itemId)))); + default: + break; + } + if (tranStatusError) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] xmin or xmax status invalid, relName=%s, blkno=%u, xmin=%lu, xmax=%lu, xminStatus=%d, " + "xmaxStatus=%d, xminCSN=%lu, xmaxCSN=%lu.", NameStr(rel->rd_rel->relname), blkno, xmin, xmax, xminStatus, + xmaxStatus, xminCSN, xmaxCSN))); + return false; } - if (!ItemIdHasStorage(itemId)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted no storage line pointer: rel %s, offset = %u, rpstart = %u, rplen = %u.", - NameStr(rel->rd_rel->relname), offset, ItemIdGetOffset(itemId), ItemIdGetLength(itemId)))); - } - itemIdSortPtr->start = ItemIdGetOffset(itemId); - itemIdSortPtr->end = itemIdSortPtr->start + SHORTALIGN(ItemIdGetLength(itemId)); - itemIdSortPtr->offset = offset; - if (itemIdSortPtr->start < pdUpper || itemIdSortPtr->end > pdSpecial) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted normal line pointer: rel %s, offset = %u, rpstart = %u, rplen = %u.", - NameStr(rel->rd_rel->relname), offset, ItemIdGetOffset(itemId), ItemIdGetLength(itemId)))); - } - VerifyIndexTuple(rel, page, offset); return true; } - + static int ItemCompare(const void *item1, const void *item2) { return ((ItemIdSort)item1)->start - ((ItemIdSort)item2)->start; } - -bool VerifyIndexPageItemId(Relation rel, Page page, VerifyLevel level) + +void UBTreeVerifyHikey(Relation rel, Page page, BlockNumber blkno) { - if (level <= USTORE_VERIFY_FAST) { - return true; - } - TupleDesc tupdes = RelationGetDescr(rel); - int keysz = IndexRelationGetNumberOfKeyAttributes(rel); - ItemIdSortData itemIdBase[MaxIndexTuplesPerPage]; - ItemIdSort itemIdSortPtr = itemIdBase; + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + Oid relOid = (rel ? rel->rd_id : InvalidOid); UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); - OffsetNumber lastPos = PageGetMaxOffsetNumber(page); - if (firstPos > lastPos) { - return true; /* empty page */ - } - - /* check key orders */ - BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); - VerifyIndexOneItemId(rel, page, firstPos, itemIdSortPtr); - itemIdSortPtr++; - IndexTuple curKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); - for (OffsetNumber nxtPos = OffsetNumberNext(firstPos); nxtPos <= lastPos; nxtPos = OffsetNumberNext(nxtPos)) { - ItemId itemId = PageGetItemId(page, nxtPos); - IndexTuple nextKey = (IndexTuple)PageGetItem(page, itemId); - if (P_ISLEAF(opaque) || nxtPos > firstPos + 1) { - /* current key must <= next key */ - if (!_bt_index_tuple_compare(tupdes, cmpKeys->scankeys, keysz, curKey, nextKey)) { - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - char *curKeyDesc = NULL; - char *nextKeyDesc = NULL; - if (P_ISLEAF(opaque)) { - index_deform_tuple(curKey, RelationGetDescr(rel), values, isnull); - curKeyDesc = BuildIndexValueDescription(rel, values, isnull); - index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); - nextKeyDesc = BuildIndexValueDescription(rel, values, isnull); - } - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted key order %s %s, rel %s", - (curKeyDesc ? curKeyDesc : "(UNKNOWN)"), - (nextKeyDesc ? nextKeyDesc : "(UNKNOWN)"), - NameStr(rel->rd_rel->relname)))); - } - } - curKey = nextKey; - VerifyIndexOneItemId(rel, page, nxtPos, itemIdSortPtr); - itemIdSortPtr++; - } - int nstorage = itemIdSortPtr - itemIdBase; - if (nstorage <= 1) { - pfree(cmpKeys); - return true; - } - - qsort((char *)itemIdBase, nstorage, sizeof(ItemIdSortData), ItemCompare); + if (P_RIGHTMOST(opaque)) + return; - for (int i = 0; i < nstorage - 1; i++) { - ItemIdSort tempPtr1 = &itemIdBase[i]; - ItemIdSort tempPtr2 = &itemIdBase[i + 1]; - if (tempPtr1->end > tempPtr2->start) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted line pointer: rel %s tempPtr1offset %u, tempPtr1start = %u, " - "tempPtr1end = %u, tempPtr2offset = %u, tempPtr2start = %u, tempPtr2end = %u.", - NameStr(rel->rd_rel->relname), - tempPtr1->offset, tempPtr1->start, tempPtr1->end, - tempPtr2->offset, tempPtr2->start, tempPtr2->end))); - } - } - - pfree(cmpKeys); - return true; -} - -bool VerifyIndexHiKeyAndOpaque(Relation rel, Page page) -{ - UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); if (P_ISLEAF(opaque) ? (opaque->btpo.level != 0) : (opaque->btpo.level == 0)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted rel %s, level %u, flag %u", NameStr(rel->rd_rel->relname), - opaque->btpo.level, opaque->btpo_flags))); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted rel %s, level %u, flag %u, tid[%d:%d]", (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), + opaque->btpo.level, opaque->btpo_flags, relOid, blkno))); + return; } - - /* compare last key and HIKEY */ + OffsetNumber lastPos = PageGetMaxOffsetNumber(page); - /* note that the first data key of internal pages has no value */ - if (!P_RIGHTMOST(opaque) && (P_ISLEAF(opaque) ? (lastPos > P_HIKEY) : (lastPos > P_FIRSTKEY))) { - IndexTuple lastTuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, lastPos)); + if (P_ISLEAF(opaque) ? (lastPos <= P_HIKEY) : (lastPos <= P_FIRSTKEY)) + return; - /* we must hold: hikey >= lastKey */ - BTScanInsert itupKey = UBTreeMakeScanKey(rel, lastTuple); - if (UBTreeCompare(rel, itupKey, page, P_HIKEY, InvalidBuffer) > 0) { - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - index_deform_tuple(lastTuple, RelationGetDescr(rel), values, isnull); - char *keyDesc = BuildIndexValueDescription(rel, values, isnull); - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted key %s with HIKEY compare in rel %s", - (keyDesc ? keyDesc : "(UNKNOWN)"), NameStr(rel->rd_rel->relname)))); - } + IndexTuple lastTuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, lastPos)); + BTScanInsert itupKey = UBTreeMakeScanKey(rel, lastTuple); + if (UBTreeCompare(rel, itupKey, page, P_HIKEY, InvalidBuffer) <= 0) { pfree(itupKey); + return; } - return true; -} + pfree(itupKey); -bool UBTreeVerifyITupleTransactionStatus(TransactionIdStatus xminStatus, TransactionIdStatus xmaxStatus, - TransactionId xmin, TransactionId xmax, CommitSeqNo xminCSN, CommitSeqNo xmaxCSN) + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + index_deform_tuple(lastTuple, RelationGetDescr(rel), values, isnull); + char *keyDesc = BuildIndexValueDescription(rel, values, isnull); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted key %s with HIKEY compare in rel %s, tid[%d:%d]", + (keyDesc ? keyDesc : "(UNKNOWN)"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), relOid, blkno))); + +} + +void UBTreeVerifyPageXid(Relation rel, BlockNumber blkno, TransactionId xidBase, TransactionId pruneXid) { - if (xminStatus == XID_INPROGRESS && xmaxStatus == XID_COMMITTED && TransactionIdIsValid(xmax)) { - return false; - } - if (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED) { - return false; + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); + Oid relOid = (rel ? rel->rd_id : InvalidOid); + + if (TransactionIdFollows(xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid) || + TransactionIdPrecedes(xidBase + MaxShortTransactionId, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] ubtree's page xid_base invalid: indexName=%s, oid=%u, blkno=%u, xid_base=%lu, nextxid=%lu.", + indexName, relOid, blkno, xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + return; } - if (xminStatus == XID_COMMITTED && xmaxStatus == XID_COMMITTED) { - if (xminCSN > xmaxCSN && xmaxCSN != COMMITSEQNO_FROZEN) { - return false; - } + if (TransactionIdFollows(pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] ubtree's page prune_xid invalid: indexName=%s, oid=%u, blkno=%u, xid_base=%lu, nextxid=%lu.", + indexName, relOid, blkno, pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + return; } - return true; } -bool VerifyIndexTransactionInfo(Relation rel, Page page, VerifyLevel level, GPIScanDesc gpiScan) +void UBTreeVerifyTupleTransactionInfo(Relation rel, Page page, OffsetNumber offnum, bool fromInsert, TransactionId xidBase) { - UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - if (!P_ISLEAF(opaque)) { - return true; - } - TransactionId xid_base = opaque->xid_base; - TransactionId pruneXid = ShortTransactionIdToNormal(xid_base, ((PageHeader)page)->pd_prune_xid); - if (TransactionIdFollows(xid_base, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY xid_base invalid: rel %s, xid_base = %lu, nextxid = %lu.", - NameStr(rel->rd_rel->relname), xid_base, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } - if (TransactionIdFollows(pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY prune_xid invalid: rel %s, prune_xid = %lu, nextxid = %lu.", - NameStr(rel->rd_rel->relname), pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } - if (level <= USTORE_VERIFY_FAST) { - return true; + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + if (offnum == InvalidOffsetNumber) + return; + + IndexTuple tuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(tuple); + TransactionId xid = fromInsert? ShortTransactionIdToNormal(xidBase, uxid->xmin) : ShortTransactionIdToNormal(xidBase, uxid->xmax); + + if (TransactionIdIsNormal(xid) && !TransactionIdIsCurrentTransactionId(xid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmodule(MOD_USTORE), errmsg("[Verify UBTree] tuple xid %s invalid: indexName=%s, oid=%u, xid=%lu.", + (fromInsert ? "xmin" : "xmax"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), + (rel ? rel->rd_id : InvalidOid), xid))); } +} - /* stat info for prune_xid and last_delete_xid */ +void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber blkno, OffsetNumber startoffset, bool fromInsert, + TransactionId xidBase) +{ + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + TransactionId maxXmax = InvalidTransactionId; TransactionId minCommittedXmax = MaxTransactionId; - + TransactionId pruneXid = ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid); OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - for (OffsetNumber offnum = P_FIRSTDATAKEY(opaque); offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId iid = PageGetItemId(page, offnum); - IndexTuple itup = (IndexTuple)PageGetItem(page, iid); - UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); + TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; + for (OffsetNumber offnum = startoffset; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { + ItemId itemid = PageGetItemId(page, offnum); + IndexTuple tuple = (IndexTuple)PageGetItem(page, itemid); + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(tuple); + TransactionId xmin = ShortTransactionIdToNormal(xidBase, uxid->xmin); + TransactionId xmax = ShortTransactionIdToNormal(xidBase, uxid->xmax); - /* fetch trans info */ - TransactionId xmin = ShortTransactionIdToNormal(xid_base, uxid->xmin); - TransactionId xmax = ShortTransactionIdToNormal(xid_base, uxid->xmax); if (TransactionIdFollows(Max(xmin, xmax), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY itup xid invalid: rel %s, xmin/xmax %lu/%lu, nextxid %lu, xid-base %lu.", - NameStr(rel->rd_rel->relname), xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextXid, - xid_base))); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] index tuple xid(xmin/xmax) is bigger than nextXid: relName=%s, blkno=%u, xmin=%lu, xmax=%lu, nextxid=%lu, xid_base=%lu.", + NameStr(rel->rd_rel->relname), blkno, xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextXid, xidBase))); + return; + } + + uint32 base = u_sess->attr.attr_storage.ustore_verify ? MaxShortTransactionId : 0; + if (TransactionIdIsNormal(xmin) && !IndexItemIdIsFrozen(itemid) && + TransactionIdPrecedes(xmin + base, oldestXmin)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] index tuple xmin invalid: relName=%s, blkno=%u, xmin=%lu, oldest_xmin=%lu, xid_base=%lu.", + NameStr(rel->rd_rel->relname), blkno, xmin, oldestXmin, xidBase))); + return; + } + if (TransactionIdIsNormal(xmax) && !ItemIdIsDead(itemid) && + TransactionIdPrecedes(xmax + base, oldestXmin)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] index tuple xmin invalid: relName=%s, blkno=%u, xmax=%lu, oldest_xmin=%lu, xid_base=%lu.", + NameStr(rel->rd_rel->relname), blkno, xmax, oldestXmin, xidBase))); + return; + } + if (!u_sess->attr.attr_storage.ustore_verify) { + continue; } TransactionIdStatus xminStatus = UBTreeCheckXid(xmin); CommitSeqNo xminCSN = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); TransactionIdStatus xmaxStatus = UBTreeCheckXid(xmax); CommitSeqNo xmaxCSN = TransactionIdGetCommitSeqNo(xmax, false, false, false, NULL); - + if (xminStatus == XID_INPROGRESS && xmaxStatus != XID_INPROGRESS && TransactionIdIsValid(xmax)) { xminStatus = UBTreeCheckXid(xmin); xminCSN = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); } - + if (xmaxStatus == XID_COMMITTED && TransactionIdPrecedes(xmax, minCommittedXmax)) { minCommittedXmax = xmax; } if (TransactionIdFollows(xmax, maxXmax)) { maxXmax = xmax; } - if (!UBTreeVerifyITupleTransactionStatus(xminStatus, xmaxStatus, xmin, xmax, xminCSN, xmaxCSN)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY xmin xmax status invalid, rel %s, xmin %lu, xmax %lu, xminStatus %d," - "xmaxStatus %d, xminCSN %lu, xmaxCSN %lu.", - NameStr(rel->rd_rel->relname), xmin, xmax, xminStatus, xmaxStatus, xminCSN, xmaxCSN))); + if (!UBTreeVerifyTupleTransactionStatus(rel, blkno, xminStatus, xmaxStatus, xmin, xmax, xminCSN, xmaxCSN)) { + return; } } - + + UBTPageOpaque uopaque = (UBTPageOpaque)PageGetSpecialPointer(page); + UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + if (TransactionIdFollows(uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] xact xid is bigger than nextXid: relName=%s, blkno=%u, xact=%lu, nextxid=%lu.", + NameStr(rel->rd_rel->relname), blkno, uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + return; + } + if (!u_sess->attr.attr_storage.ustore_verify) { + return; + } if (minCommittedXmax != MaxTransactionId && TransactionIdIsValid(pruneXid) && TransactionIdFollows(minCommittedXmax, pruneXid)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY prune_xid invalid, rel = %s, prune_xid on page = %lu, actual value = %lu.", - NameStr(rel->rd_rel->relname), pruneXid, minCommittedXmax))); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] min_committed_xmax is bigger than prune_xid: relName=%s, blkno=%u, prune_xid=%lu, minCommittedXmax=%lu.", + NameStr(rel->rd_rel->relname), blkno, pruneXid, minCommittedXmax))); + return; + } + + if (TransactionIdIsValid(maxXmax) && TransactionIdIsValid(ubtOpaque->last_delete_xid) && + TransactionIdFollows(maxXmax, ubtOpaque->last_delete_xid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] max_xmax is bigger than last_delete_xid: relName=%s, blkno=%u, last_delete_xid on page=%lu, actual value=%lu.", + NameStr(rel->rd_rel->relname), blkno, ubtOpaque->last_delete_xid, maxXmax))); } - - if (TransactionIdIsValid(maxXmax) && TransactionIdIsValid(opaque->last_delete_xid) && - TransactionIdFollows(maxXmax, opaque->last_delete_xid)) { - ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY last_delete_xid invalid, rel = %s, last_delete_xid on page = %lu, " - "actual value = %lu.", - NameStr(rel->rd_rel->relname), opaque->last_delete_xid, maxXmax))); +} + +void UBTreeVerifyRowptrDML(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum) +{ + if (u_sess->attr.attr_storage.ustore_verify) { + return UBTreeVerifyRowptrNonDML(rel, page, blkno); + } + if (offnum == InvalidOffsetNumber) { + return; + } + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); + Oid relOid = (rel ? rel->rd_id : InvalidOid); + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); + OffsetNumber lastPos = PageGetMaxOffsetNumber(page); + if (firstPos > lastPos) { + return; } - - /* skip unique check for non-unique index */ - if (!rel->rd_index->indisunique && !rel->rd_index->indisprimary) { - return true; + ItemIdSort indexSortPtr = (ItemIdSort)palloc0(sizeof(ItemIdSortData)); + UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, offnum, indexSortPtr, indexName, relOid); + pfree(indexSortPtr); + + UBTreeVerifyTupleKey(rel, page, blkno, offnum, firstPos, lastPos); +} + +void UBTreeVerifyItems(Relation rel, BlockNumber blkno, TupleDesc desc, BTScanInsert cmpKeys, int keysz, + IndexTuple currKey, IndexTuple nextKey, UBTPageOpaqueInternal opaque) +{ + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + if (_bt_index_tuple_compare(desc, cmpKeys->scankeys, keysz, currKey, nextKey)) + return; + + char *currkeyDesc = NULL; + char *nextkeyDesc = NULL; + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + if (P_ISLEAF(opaque)) { + index_deform_tuple(currKey, RelationGetDescr(rel), values, isnull); + currkeyDesc = BuildIndexValueDescription(rel, values, isnull); + index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); + nextkeyDesc = BuildIndexValueDescription(rel, values, isnull); + } + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, relName=%s, blkno=%u.", (nextkeyDesc ? nextkeyDesc : "(unknown)"), + (currkeyDesc ? currkeyDesc : "(unknown)"), NameStr(rel->rd_rel->relname), blkno))); +} + +static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, + OffsetNumber firstPos, OffsetNumber lastPos) +{ + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + TupleDesc desc = RelationGetDescr(rel); + int keySize = IndexRelationGetNumberOfKeyAttributes(rel); + BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); + IndexTuple currKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); + if (offnum > firstPos) { + ItemId itemId = PageGetItemId(page, OffsetNumberPrev(offnum)); + IndexTuple prev_key = (IndexTuple)PageGetItem(page, itemId); + UBTreeVerifyItems(rel, blkno, desc, cmpKeys, keySize, prev_key, currKey, opaque); } - /* skip unique check if gpiScan of GPI is not given */ - if (RelationIsGlobalIndex(rel) && !gpiScan) { - return true; + if (offnum < lastPos) { + ItemId itemId = PageGetItemId(page, OffsetNumberNext(offnum)); + IndexTuple next_key = (IndexTuple)PageGetItem(page, itemId); + UBTreeVerifyItems(rel, blkno, desc, cmpKeys, keySize, currKey, next_key, opaque); } - return true; + pfree(cmpKeys); } - -bool UBTreePageVerify(UBtreePageVerifyParams *verifyParams) + +static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) { - if (verifyParams == NULL) { - return false; - } + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); + Oid relOid = (rel ? rel->rd_id : InvalidOid); + TupleDesc desc = RelationGetDescr(rel); + int keysz = IndexRelationGetNumberOfKeyAttributes(rel); + ItemIdSortData itemidBase[MaxIndexTuplesPerPage]; + ItemIdSort sortPtr = itemidBase; + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); + OffsetNumber lastPos = PageGetMaxOffsetNumber(page); - Relation rel = verifyParams->bvInfo.rel; - Page page = verifyParams->page; - VerifyLevel level = verifyParams->bvInfo.vLevel; - GPIScanDesc gpiScan = verifyParams->gpiScan; + if (firstPos > lastPos) { + return; + } + + BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); + UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, firstPos, sortPtr, indexName, relOid); + IndexTuple currKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); + OffsetNumber nextPos = OffsetNumberNext(firstPos); + sortPtr++; + while (nextPos <= lastPos) { + ItemId itemid = PageGetItemId(page, nextPos); + IndexTuple nextKey = (IndexTuple)PageGetItem(page, itemid); + if (P_ISLEAF(opaque) || nextPos > firstPos + 1) { + if (!_bt_index_tuple_compare(desc, cmpKeys->scankeys, keysz, currKey, nextKey)) { + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + char *currkeyDesc = NULL; + char *nextkeyDesc = NULL; + if (P_ISLEAF(opaque)) { + index_deform_tuple(currKey, RelationGetDescr(rel), values, isnull); + currkeyDesc = BuildIndexValueDescription(rel, values, isnull); + index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); + nextkeyDesc = BuildIndexValueDescription(rel, values, isnull); + } + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, indexName=%s, oid %u, blkno=%u.", + (nextkeyDesc ? nextkeyDesc : "(unknown)"), (currkeyDesc ? currkeyDesc : "(unknown)"), indexName, relOid, blkno))); + pfree(cmpKeys); + return; + } + } + currKey = nextKey; + UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, nextPos, sortPtr, indexName, relOid); + nextPos = OffsetNumberNext(nextPos); + sortPtr++; + } + + int storageNum = sortPtr - itemidBase; + if (storageNum <= 1) { + pfree(cmpKeys); + return; + } + + qsort((char*)itemidBase, storageNum, sizeof(ItemIdSortData), ItemCompare); + + for (int i = 0; i < storageNum - 1; i++) { + ItemIdSort tempPtr1 = &itemidBase[i]; + ItemIdSort tempPtr2 = &itemidBase[i + 1]; + if (tempPtr1->end > tempPtr2->start) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] Ubtree ItemIdSort conflict: indexName=%s, oid=%u, blkno=%u, ptr1offset %u, " + "ptr1start = %u, ptr1end = %u, ptr2offset = %u, ptr2start = %u, ptr2end = %u.", + indexName, relOid, blkno, tempPtr1->offset, tempPtr1->start, tempPtr1->end, + tempPtr2->offset, tempPtr2->start, tempPtr2->end))); + pfree(cmpKeys); + return; + } + } + + pfree(cmpKeys); +} + +void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + BTPageOpaqueInternal opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); if (P_IGNORE(opaque)) { - return true; + return; + } + + UBTreeVerifyHeader((PageHeaderData*)page, rel, blkno, PageGetPageSize((PageHeader)page), GetPageHeaderSize(page)); + UBTreeVerifyHikey(rel, page, blkno); + UBTreeVerifyRowptrDML(rel, page, blkno, offnum); + UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + if (!P_ISLEAF(ubtOpaque)) { + return; + } + TransactionId xidBase = ubtOpaque->xid_base; + UBTreeVerifyPageXid(rel, blkno, xidBase, ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid)); + UBTreeVerifyTupleTransactionInfo(rel, page, offnum, fromInsert, xidBase); +} + +static void UBTreeVerifyHeader(PageHeaderData* page, Relation rel, BlockNumber blkno, uint16 pageSize, uint16 headerSize) +{ + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + if (pageSize != BLCKSZ || (page->pd_flags & ~PD_VALID_FLAG_BITS) != 0 || page->pd_lower < headerSize || + page->pd_lower > page->pd_upper || page->pd_upper > page->pd_special || page->pd_special > BLCKSZ) { + const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); + Oid relOid = (rel ? rel->rd_id : InvalidOid); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] index page header invalid: indexName=%s, oid=%u, blkno=%u, size=%u," + "flags=%u, lower=%u, upper=%u, special=%u.", indexName, relOid, blkno, headerSize, + page->pd_flags, page->pd_lower, page->pd_upper, page->pd_special))); } +} + +static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber blkno, OffsetNumber offset, + ItemIdSort indexSortPtr, const char *indexName, Oid relOid) +{ + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + ItemId itemId = PageGetItemId(page, offset); + unsigned rpStart = ItemIdGetOffset(itemId); + Size rpLen = ItemIdGetLength(itemId); - VerifyIndexPageHeader(rel, page); - VerifyIndexHiKeyAndOpaque(rel, page); - VerifyIndexPageItemId(rel, page, level); - VerifyIndexTransactionInfo(rel, page, level, gpiScan); - return true; + if (!ItemIdIsUsed(itemId)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] row pointer is unused: indexName=%s, oid=%u, blkno=%u, offset=%u, " + "rowPtr startOffset=%u, rowPtr len=%lu.", indexName, relOid, blkno, offset, rpStart, rpLen))); + return; + } + if (!ItemIdHasStorage(itemId)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] row pointer has no storage: indexName=%s, oid=%u, blkno=%u, offset=%u, " + "rowPtr startOffset=%u, rowPtr len=%lu.", indexName, relOid, blkno, offset, rpStart, rpLen))); + return; + } + indexSortPtr->start = rpStart; + indexSortPtr->end = indexSortPtr->start + SHORTALIGN(ItemIdGetLength(itemId)); + indexSortPtr->offset = offset; + if (indexSortPtr->start < header->pd_upper || indexSortPtr->end > header->pd_special) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] The item corresponding to row pointer exceeds the range of item stored in the page: indexName=%s, oid=%u, " + "blkno=%u, offset=%u, rowPtr startOffset=%u, rowPtr len=%lu.", indexName, relOid, blkno, offset, rpStart, rpLen))); + return; + } + int tupleSize = IndexTupleSize((IndexTuple)PageGetItem(page, itemId)); + if (tupleSize > (int)rpLen) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify UBTree] tuple size is bigger than item's len: indexName=%s, oid=%u, blkno=%u, offset=%u, " + "tuple size=%d, rowPtr len=%lu.", indexName, relOid, blkno, offset, tupleSize, rpLen))); + return; + } } +void UBTreeVerifyAll(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + UBTreeVerifyPage(rel, page, blkno, offnum, fromInsert); + BTPageOpaqueInternal opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); + if (P_IGNORE(opaque)) { + return; + } + UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + if (!P_ISLEAF(ubtOpaque)) { + return; + } + TransactionId xidBase = ubtOpaque->xid_base; + UBTreeVerifyAllTuplesTransactionInfo(rel, page, blkno, P_FIRSTDATAKEY(ubtOpaque), fromInsert, xidBase); +} diff --git a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp index 40eea37565..95ff1ded2c 100644 --- a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp @@ -166,7 +166,6 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTr UBTPageOpaqueInternal opaque; OffsetNumber offnum, maxoff; IndexPruneState prstate; - UBtreePageVerifyParams verifyParams; WHITEBOX_TEST_STUB("UBTreePagePruneOpt", WhiteboxDefaultErrorEmit); @@ -280,11 +279,7 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTr } END_CRIT_SECTION(); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } + UBTreeVerifyAll(rel, page, BufferGetBlockNumber(buf), InvalidOffsetNumber, false); return has_pruned; } @@ -707,7 +702,6 @@ static TransactionId UBTreeCheckUnique(Relation rel, IndexTuple itup, Relation h Buffer nbuf = InvalidBuffer; bool found = false; Relation tarRel = heapRel; - UBtreePageVerifyParams verifyParams; WHITEBOX_TEST_STUB("UBTreeCheckUnique", WhiteboxDefaultErrorEmit); @@ -719,10 +713,6 @@ static TransactionId UBTreeCheckUnique(Relation rel, IndexTuple itup, Relation h page = BufferGetPage(buf); opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); maxoff = PageGetMaxOffsetNumber(page); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, (char *) &verifyParams, - rel, page, InvalidBlockNumber, InvalidOffsetNumber, NULL, gpiScan, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } /* * Scan over all equal tuples, looking for live conflicts. @@ -906,11 +896,6 @@ static TransactionId UBTreeCheckUnique(Relation rel, IndexTuple itup, Relation h ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("fell off the end of index \"%s\" at blkno %u", RelationGetRelationName(rel), nblkno))); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, gpiScan, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } } maxoff = PageGetMaxOffsetNumber(page); offset = P_FIRSTDATAKEY(opaque); @@ -987,7 +972,6 @@ static OffsetNumber UBTreeFindInsertLoc(Relation rel, Buffer *bufptr, OffsetNumb UBTPageOpaqueInternal lpageop; bool movedright = false; bool pruned = false; - UBtreePageVerifyParams verifyParams; lpageop = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); @@ -1027,11 +1011,6 @@ static OffsetNumber UBTreeFindInsertLoc(Relation rel, Buffer *bufptr, OffsetNumb for (;;) { rbuf = _bt_relandgetbuf(rel, rbuf, rblkno, BT_WRITE); page = BufferGetPage(rbuf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } lpageop = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); /* * If this page was incompletely split, finish the split now. @@ -1221,7 +1200,6 @@ static void UBTreeInsertOnPage(Relation rel, BTScanInsert itup_key, Buffer buf, BTMetaPageData *metad = NULL; OffsetNumber itup_off; BlockNumber itup_blkno; - UBtreePageVerifyParams verifyParams; itup_off = newitemoff; itup_blkno = BufferGetBlockNumber(buf); @@ -1340,11 +1318,7 @@ static void UBTreeInsertOnPage(Relation rel, BTScanInsert itup_key, Buffer buf, } END_CRIT_SECTION(); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } + UBTreeVerifyPage(rel, page, BufferGetBlockNumber(buf), itup_off, true); /* release buffers */ if (BufferIsValid(metabuf)) { _bt_relbuf(rel, metabuf); @@ -1394,6 +1368,8 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi bool isleaf = false; errno_t rc; IndexTuple firstright, lefthighkey; + Buffer actualInsertBuf = InvalidBuffer; + OffsetNumber actualInsertOff = InvalidOffsetNumber; WHITEBOX_TEST_STUB("UBTreeSplit", WhiteboxDefaultErrorEmit); @@ -1650,6 +1626,8 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi /* does new item belong before this one? */ if (i == newitemoff) { + Assert(actualInsertBuf == InvalidBuffer); + Assert(actualInsertOff == InvalidOffsetNumber); if (newitemonleft) { newitemleftoff = leftoff; if (!UBTreePageAddTuple(leftpage, newitemsz, newitem, leftoff, true)) { @@ -1659,6 +1637,8 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi errmsg("failed to add new item to the left sibling while splitting block %u of index \"%s\"", origpagenumber, RelationGetRelationName(rel)))); } + actualInsertBuf = buf; + actualInsertOff = leftoff; leftoff = OffsetNumberNext(leftoff); /* update active hint */ lopaque->activeTupleCount++; @@ -1670,6 +1650,8 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi errmsg("failed to add new item to the right sibling while splitting block %u of index \"%s\"", origpagenumber, RelationGetRelationName(rel)))); } + actualInsertBuf = rbuf; + actualInsertOff = rightoff; rightoff = OffsetNumberNext(rightoff); /* update active hint */ ropaque->activeTupleCount++; @@ -1718,6 +1700,10 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi errmsg("failed to add new item to the right sibling while splitting block %u of index \"%s\"", origpagenumber, RelationGetRelationName(rel)))); } + Assert(actualInsertBuf == InvalidBuffer); + Assert(actualInsertOff == InvalidOffsetNumber); + actualInsertBuf = rbuf; + actualInsertOff = rightoff; rightoff = OffsetNumberNext(rightoff); /* update active hint */ ropaque->activeTupleCount++; @@ -1888,6 +1874,8 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi } END_CRIT_SECTION(); + Page page = BufferGetPage(actualInsertBuf); + UBTreeVerifyPage(rel, page, BufferGetBlockNumber(actualInsertBuf), actualInsertOff, true); /* discard this page from the Recycle Queue */ UBTreeRecordUsedPage(rel, addr); @@ -2007,15 +1995,10 @@ static OffsetNumber UBTreeFindDeleteLoc(Relation rel, Buffer* bufP, OffsetNumber Page page; UBTPageOpaqueInternal opaque; TransactionId xmin, xmax; - UBtreePageVerifyParams verifyParams; WHITEBOX_TEST_STUB("UBTreeFindDeleteLoc", WhiteboxDefaultErrorEmit); page = BufferGetPage(*bufP); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, (char *) &verifyParams, - rel, page, InvalidBlockNumber, InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); maxoff = PageGetMaxOffsetNumber(page); @@ -2097,11 +2080,6 @@ static OffsetNumber UBTreeFindDeleteLoc(Relation rel, Buffer* bufP, OffsetNumber nblkno = opaque->btpo_next; *bufP = _bt_relandgetbuf(rel, *bufP, nblkno, BT_WRITE); page = BufferGetPage(*bufP); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); if (!P_IGNORE(opaque)) break; @@ -2243,7 +2221,6 @@ static void UBTreeDeleteOnPage(Relation rel, Buffer buf, OffsetNumber offset, bo IndexTuple itup = (IndexTuple)PageGetItem(page, iid); UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); TransactionId xid = GetCurrentTransactionId(); - UBtreePageVerifyParams verifyParams; /* Do the update. No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -2282,10 +2259,7 @@ static void UBTreeDeleteOnPage(Relation rel, Buffer buf, OffsetNumber offset, bo } END_CRIT_SECTION(); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, (char *) &verifyParams, - rel, page, InvalidBlockNumber, InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } + UBTreeVerifyPage(rel, page, BufferGetBlockNumber(buf), offset, false); bool needRecordEmpty = (opaque->activeTupleCount == 0); if (needRecordEmpty) { /* @@ -2602,6 +2576,7 @@ static Buffer UBTreeNewRoot(Relation rel, Buffer lbuf, Buffer rbuf) } END_CRIT_SECTION(); + UBTreeVerifyAll(rel, rootpage, rootblknum, InvalidOffsetNumber, false); /* done with metapage */ _bt_relbuf(rel, metabuf); diff --git a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp index e4531da7d2..d4e68db9b3 100644 --- a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp @@ -46,6 +46,10 @@ static bool QueuePageIsEmpty(Buffer buf); static Buffer AcquireNextAvailableQueuePage(Relation rel, Buffer buf, UBTRecycleForkNumber forkNumber); static void InsertOnRecycleQueuePage(Relation rel, Buffer buf, uint16 offset, BlockNumber blkno, TransactionId xid); static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset); +static void UBTRecycleQueueExecVerify(int mode, UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, UBTRecycleMeta metaData, OffsetNumber offnum = InvalidOffsetNumber, bool fromInsert = false); +static void UBTRecycleQueueVerifyHeader(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno); +static void UBTRecycleQueueVerifyItem(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); +static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, BlockNumber metaBlkno); const BlockNumber FirstBlockNumber = 0; const BlockNumber FirstNormalBlockNumber = 2; /* 0 and 1 are pages which include meta data */ @@ -143,6 +147,7 @@ void UBTreeRecycleQueueInitPage(Relation rel, Page page, BlockNumber blkno, Bloc UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, blkno); header->prevBlkno = prevBlkno; header->nextBlkno = nextBlkno; + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, blkno, NULL); } /* record the chain changes in prev or next page */ @@ -154,6 +159,7 @@ void UBtreeRecycleQueueChangeChain(Buffer buf, BlockNumber newBlkno, bool setNex } else { header->prevBlkno = newBlkno; } + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, NULL, BufferGetBlockNumber(buf), NULL); } static void LogInitRecycleQueuePage(Relation rel, Buffer buf, Buffer leftBuf, Buffer rightBuf) @@ -221,6 +227,8 @@ static void InitRecycleQueueInitialPage(Relation rel, Buffer buf) } END_CRIT_SECTION(); + UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, blkno); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, blkno, NULL); } Buffer ReadRecycleQueueBuffer(Relation rel, BlockNumber blkno) @@ -519,6 +527,7 @@ Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBT if (metaChanged) { MarkBufferDirtyHint(metaBuf, false); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_METADATA, NULL, rel, BufferGetBlockNumber(metaBuf), metaData); } UnlockReleaseBuffer(metaBuf); @@ -526,7 +535,7 @@ Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBT return indexBuf; } -void UBTreeRecycleQueuePageChangeEndpointLeftPage(Buffer buf, bool isHead) +void UBTreeRecycleQueuePageChangeEndpointLeftPage(Relation rel, Buffer buf, bool isHead) { uint32 endpointFlag = (isHead ? URQ_HEAD_PAGE : URQ_TAIL_PAGE); UBTRecycleQueueHeader header = GetRecycleQueueHeader(BufferGetPage(buf), BufferGetBlockNumber(buf)); @@ -539,9 +548,12 @@ void UBTreeRecycleQueuePageChangeEndpointLeftPage(Buffer buf, bool isHead) tailItem->next = OtherBlockOffset; } header->flags &= ~endpointFlag; + if (rel == NULL) { + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, BufferGetBlockNumber(buf), NULL); + } } -void UBTreeRecycleQueuePageChangeEndpointRightPage(Buffer buf, bool isHead) +void UBTreeRecycleQueuePageChangeEndpointRightPage(Relation rel, Buffer buf, bool isHead) { uint32 endpointFlag = (isHead ? URQ_HEAD_PAGE : URQ_TAIL_PAGE); UBTRecycleQueueHeader header = GetRecycleQueueHeader(BufferGetPage(buf), BufferGetBlockNumber(buf)); @@ -557,6 +569,9 @@ void UBTreeRecycleQueuePageChangeEndpointRightPage(Buffer buf, bool isHead) Assert(header->head == InvalidOffset); } header->flags |= endpointFlag; + if (rel == NULL) { + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, BufferGetBlockNumber(buf), NULL); + } } static void RecycleQueueChangeEndpoint(Relation rel, Buffer buf, Buffer nextBuf, bool isHead) @@ -569,8 +584,8 @@ static void RecycleQueueChangeEndpoint(Relation rel, Buffer buf, Buffer nextBuf, /* Do the update. No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); - UBTreeRecycleQueuePageChangeEndpointLeftPage(buf, isHead); - UBTreeRecycleQueuePageChangeEndpointRightPage(nextBuf, isHead); + UBTreeRecycleQueuePageChangeEndpointLeftPage(rel, buf, isHead); + UBTreeRecycleQueuePageChangeEndpointRightPage(rel, nextBuf, isHead); MarkBufferDirty(buf); MarkBufferDirty(nextBuf); @@ -594,6 +609,11 @@ static void RecycleQueueChangeEndpoint(Relation rel, Buffer buf, Buffer nextBuf, } END_CRIT_SECTION(); + + UBTRecycleQueueHeader nextHeader = GetRecycleQueueHeader(BufferGetPage(nextBuf), BufferGetBlockNumber(nextBuf)); + UBTRecycleQueueHeader header = GetRecycleQueueHeader(BufferGetPage(buf), BufferGetBlockNumber(buf)); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, nextHeader, rel, BufferGetBlockNumber(nextBuf), NULL); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, BufferGetBlockNumber(buf), NULL); } static Buffer MoveToEndpointPage(Relation rel, Buffer buf, bool needHead, int access) @@ -712,7 +732,15 @@ static void RecycleQueueLinkNewPage(Relation rel, Buffer leftBuf, Buffer newBuf) } END_CRIT_SECTION(); - + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, leftHeader, rel, leftBlkno, NULL); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, blkno, NULL); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, rightHeader, rel, rightBlkno, NULL); + if (header->prevBlkno == header->nextBlkno) { + ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( + "RecycleQueueLinkNewPage invalid: prev and next page is the same, " + "blkno %u next_blkno %u prev next blkno %u index \"%s\" oid %u.", leftBlkno, + BufferGetBlockNumber(newBuf), header->prevBlkno, RelationGetRelationName(rel), rel->rd_id))); + } UnlockReleaseBuffer(rightBuf); } @@ -761,7 +789,7 @@ static Buffer AcquireNextAvailableQueuePage(Relation rel, Buffer buf, UBTRecycle return newBuf; } -static void TryFixMetaData(Buffer metaBuf, int32 oldval, int32 newval, bool isHead) +static void TryFixMetaData(Buffer metaBuf, int32 oldval, int32 newval, bool isHead, Relation rel) { UBTRecycleMeta metaData = (UBTRecycleMeta)PageGetContents(BufferGetPage(metaBuf)); int32 *addr = (isHead ? (int32 *)&(metaData->headBlkno) : (int32 *)&(metaData->tailBlkno)); @@ -769,6 +797,7 @@ static void TryFixMetaData(Buffer metaBuf, int32 oldval, int32 newval, bool isHe /* update succeed, mark buffer dirty */ if (ConditionalLockBuffer(metaBuf)) { MarkBufferDirty(metaBuf); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_METADATA, NULL, rel, BufferGetBlockNumber(metaBuf), metaData); LockBuffer(metaBuf, BUFFER_LOCK_UNLOCK); } } @@ -793,7 +822,7 @@ Buffer RecycleQueueGetEndpointPage(Relation rel, UBTRecycleForkNumber forkNumber /* try to fix the information in the meta if necessary */ BlockNumber trueBlkno = BufferGetBlockNumber(buf); if (trueBlkno != givenBlkno) { - TryFixMetaData(metaBuf, givenBlkno, trueBlkno, needHead); + TryFixMetaData(metaBuf, givenBlkno, trueBlkno, needHead, rel); } ReleaseBuffer(metaBuf); @@ -906,7 +935,7 @@ static void InsertOnRecycleQueuePage(Relation rel, Buffer buf, uint16 offset, Bl } END_CRIT_SECTION(); - + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM, header, rel, BufferGetBlockNumber(buf), NULL, offset, true); UnlockReleaseBuffer(buf); } @@ -946,6 +975,7 @@ void UBTreeXlogRecycleQueueModifyPage(Buffer buf, xl_ubtree2_recycle_queue_modif item->next = header->freeListHead; header->freeListHead = xlrec->offset; } + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM, header, NULL, blkno, NULL, xlrec->offset, xlrec->isInsert); } static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset) @@ -990,6 +1020,7 @@ static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset) } END_CRIT_SECTION(); + UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM, header, rel, BufferGetBlockNumber(buf), NULL, offset, false); if (!(IsNormalOffset(header->head))) { /* deleting the only item on this page */ @@ -1156,3 +1187,166 @@ uint32 UBTreeRecycleQueuePageDump(Relation rel, Buffer buf, bool recordEachItem, return errVerified; } +static void UBTRecycleQueueExecVerify(int mode, UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, + UBTRecycleMeta metaData, OffsetNumber offnum, bool fromInsert) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + + int module = mode & USTORE_VERIFY_SUB_MOD_MASK; + if (module & USTORE_VERIFY_URQ_SUB_HEADER) { + UBTRecycleQueueVerifyHeader(header, rel, blkno); + } + if (module & USTORE_VERIFY_URQ_SUB_ITEM) { + UBTRecycleQueueVerifyItem(header, rel, blkno, offnum, fromInsert); + } + if (module & USTORE_VERIFY_URQ_SUB_METADATA) { + UBTRecycleMetaDataVerify(metaData, rel, blkno); + } +} + +void UBTRecycleQueueVerifyPageOffline(Relation rel, Page page, BlockNumber blkno) +{ + UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, blkno); + UBTRecycleMeta metaData = NULL; + int mode = USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM; + if (IsMetaPage(blkno)) { + metaData = (UBTRecycleMeta)PageGetContents(page); + mode &= USTORE_VERIFY_URQ_SUB_METADATA; + } + UBTRecycleQueueExecVerify(mode, header, rel, blkno, metaData, InvalidOffset, false); +} + +static void UBTRecycleQueueVerifyHeader(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + uint32 urqBlocks = MaxBlockNumber; + Oid relOid = InvalidOid; + bool headerError = false; + + if (rel != NULL) { + RelationOpenSmgr(rel); + urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); + relOid = rel->rd_id; + } + + headerError = (header->flags > (URQ_HEAD_PAGE | URQ_TAIL_PAGE)) || (IsNormalOffset(header->head) && !IsNormalOffset(header->tail)) || + (!IsNormalOffset(header->head) && IsNormalOffset(header->tail)) || (header->freeItems > BlockGetMaxItems(blkno)) || + (header->prevBlkno == header->nextBlkno) || (header->prevBlkno == blkno || header->nextBlkno == blkno) || + (header->prevBlkno >= urqBlocks || header->nextBlkno >= urqBlocks); + + if (headerError) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify URQ] urq header is invalid : oid=%u, blkno=%u, flags=%u, head=%d, tail=%d," + " free_items=%d, free_list_head=%d, prev_blkno=%u, next_blkno=%u", relOid, blkno, header->flags, + header->head, header->tail, header->freeItems, header->freeListHead, header->prevBlkno, header->nextBlkno))); + } +} + +void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Oid oid, BlockNumber blkno) +{ + TransactionId maxXid = ReadNewTransactionId(); + TransactionId prevXid = 0; + uint16 itemCount = 0; + uint16 itemMaxNum = BlockGetMaxItems(blkno); + uint16 currOffset = header->head; + uint16 prevOffset = InvalidOffset; + + UBTRecycleQueueItem item = NULL; + + while (IsNormalOffset(currOffset) && itemCount <= itemMaxNum) { + if (currOffset == itemMaxNum) { + break; + } + item = &header->items[currOffset]; + if (item->prev != prevOffset || item->next == currOffset) { + break; + } + if (item->xid > maxXid || item->xid < prevXid) { + break; + } + itemCount++; + prevXid = item->xid; + prevOffset = currOffset; + currOffset = item->next; + } + + uint16 freelistOffset = header->freeListHead; + while (freelistOffset != InvalidOffset && itemCount <= itemMaxNum) { + if (freelistOffset == itemMaxNum) { + break; + } + item = &header->items[freelistOffset]; + if (item->blkno == InvalidBlockNumber && item->xid == InvalidTransactionId && + item->prev == InvalidOffset) { + itemCount++; + freelistOffset = item->next; + } + } + + if (itemCount + header->freeItems != itemMaxNum) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify URQ] urq items are invalid : oid %u, blkno %u, (items info : curr_item_offset = %u, " + "prev_offset = %u, item_count = %u, free_list_offset = %u, free_items = %u, next_xid = %ld)", + oid, blkno, currOffset, prevOffset, itemCount, freelistOffset, header->freeItems, maxXid))); + } +} + +static void UBTRecycleQueueVerifyItem(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + + Oid relOid = (rel ? rel->rd_id : InvalidOid); + bool itemError = false; + UBTRecycleQueueItem item = NULL; + + if (offnum != InvalidOffset) { + item = &header->items[offnum]; + if (fromInsert) { + itemError = (item->blkno == InvalidBlockNumber) || (item->next == offnum); + } else { + itemError = (header->freeListHead != offnum) || (item->xid != InvalidTransactionId) || + (item->blkno != InvalidBlockNumber) || (item->prev != InvalidOffset); + } + if (itemError) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify URQ] urq item is invalid: oid=%u, blkno=%u, offset=%u, " + "(item info : xid=%ld blkno=%u prev=%u next=%u)", relOid, blkno, offnum, + item->xid, item->blkno, item->prev, item->next))); + } + } + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + + UBTRecycleQueueVerifyAllItems(header, relOid, blkno); +} + +static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, BlockNumber metaBlkno) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + + BlockNumber indexBlocks = (rel == NULL ? metaData->nblocksUpper : RelationGetNumberOfBlocks(rel)); + uint32 urqBlocks = MaxBlockNumber; + Oid oid = InvalidOid; + bool metaError = false; + + if (rel != NULL) { + RelationOpenSmgr(rel); + urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); + oid = rel->rd_id; + } + + metaError = ((metaData->headBlkno == 1 - metaBlkno) || (metaData->tailBlkno == 1 - metaBlkno)) || + (metaData->headBlkno >= urqBlocks || metaData->tailBlkno >= urqBlocks) || (metaData->nblocksUpper > indexBlocks); + + if (metaError) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( + "[Verify URQ] urq meta is invalid : oid=%u, meta_blkno=%u, (meta info : headBlkno = %u, tailBlkno = %u, " + "nblocksUpper = %u, nblocksLower = %u; urq_blocks = %u, index_blocks = %u)", + oid, metaBlkno, metaData->headBlkno, metaData->tailBlkno, metaData->nblocksUpper, + metaData->nblocksLower, urqBlocks, indexBlocks))); + } +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/ubtree/ubtree.cpp b/src/gausskernel/storage/access/ubtree/ubtree.cpp index a5904345bd..c8716db49c 100644 --- a/src/gausskernel/storage/access/ubtree/ubtree.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtree.cpp @@ -1219,7 +1219,7 @@ static void IndexPageShiftBase(Relation rel, Page page, int64 delta, bool needWa } END_CRIT_SECTION(); - + UBTreeVerifyAll(rel, page, BufferGetBlockNumber(buf), InvalidOffsetNumber, false); WHITEBOX_TEST_STUB("IndexPageShiftBase-end", WhiteboxDefaultErrorEmit); } diff --git a/src/gausskernel/storage/access/ubtree/ubtsearch.cpp b/src/gausskernel/storage/access/ubtree/ubtsearch.cpp index 06268ce8ec..8343ec9b03 100644 --- a/src/gausskernel/storage/access/ubtree/ubtsearch.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtsearch.cpp @@ -64,7 +64,6 @@ BTStack UBTreeSearch(Relation rel, BTScanInsert key, Buffer *bufP, int access, b { BTStack stack_in = NULL; int pageAccess = BT_READ; - UBtreePageVerifyParams verifyParams; /* Get the root page to start with */ *bufP = UBTreeGetRoot(rel, access); @@ -143,11 +142,6 @@ BTStack UBTreeSearch(Relation rel, BTScanInsert key, Buffer *bufP, int access, b */ if (opaque->btpo.level == 1 && access == BT_WRITE) pageAccess = BT_WRITE; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } /* drop the read lock on the parent page, acquire one on the child */ *bufP = _bt_relandgetbuf(rel, *bufP, blkno, pageAccess, par_blkno); @@ -1311,7 +1305,6 @@ static bool UBTreeReadPage(IndexScanDesc scan, ScanDirection dir, OffsetNumber o Oid heapOid = IndexScanGetPartHeapOid(scan); TransactionId xidBase; bool isnull = false; - UBtreePageVerifyParams verifyParams; tupdesc = RelationGetDescr(scan->indexRelation); PartitionOidAttr = IndexRelationGetNumberOfAttributes(scan->indexRelation); @@ -1419,11 +1412,6 @@ static bool UBTreeReadPage(IndexScanDesc scan, ScanDirection dir, OffsetNumber o so->currPos.lastItem = MaxIndexTuplesPerPage - 1; so->currPos.itemIndex = MaxIndexTuplesPerPage - 1; } - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UBTREE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, scan->indexRelation, page, InvalidBlockNumber, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UBTREE, (char *) &verifyParams); - } return (so->currPos.firstItem <= so->currPos.lastItem); } diff --git a/src/gausskernel/storage/access/ubtree/ubtutils.cpp b/src/gausskernel/storage/access/ubtree/ubtutils.cpp index 83687a96d8..64030501b0 100644 --- a/src/gausskernel/storage/access/ubtree/ubtutils.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtutils.cpp @@ -427,6 +427,11 @@ static bool UBTreeVisibilityCheckWrap(IndexScanDesc scan, Page page, OffsetNumbe */ TransactionIdStatus UBTreeCheckXid(TransactionId xid) { + if (xid == FrozenTransactionId || (TransactionIdIsNormal(xid) && + TransactionIdPrecedes(xid, pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid)) && + !RecoveryInProgress())) { + return XID_COMMITTED; + } TransactionIdStatus ts = TransactionIdGetStatus(xid); /* Please refer to HeapTupleSatisfiesVaccum */ if (ts == XID_INPROGRESS) { diff --git a/src/gausskernel/storage/access/ustore/Makefile b/src/gausskernel/storage/access/ustore/Makefile index d42350b7d3..0bc93a72db 100644 --- a/src/gausskernel/storage/access/ustore/Makefile +++ b/src/gausskernel/storage/access/ustore/Makefile @@ -11,6 +11,6 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -OBJS = knl_uheap.o knl_upage.o knl_utuple.o knl_uhio.o knl_uscan.o knl_uvisibility.o knl_uvacuumlazy.o knl_uundorecord.o knl_uundovec.o knl_undolauncher.o knl_undoworker.o knl_undorequest.o knl_undoaction.o knl_pruneuheap.o knl_uredo.o knl_uextremeredo.o knl_uam.o knl_utuptoaster.o $(top_builddir)/src/test/whitebox/knl_whitebox_test.o knl_uverify.o +OBJS = knl_uheap.o knl_upage.o knl_utuple.o knl_uhio.o knl_uscan.o knl_uvisibility.o knl_uvacuumlazy.o knl_uundorecord.o knl_uundovec.o knl_undolauncher.o knl_undoworker.o knl_undorequest.o knl_undoaction.o knl_pruneuheap.o knl_uredo.o knl_uextremeredo.o knl_uam.o knl_utuptoaster.o $(top_builddir)/src/test/whitebox/knl_whitebox_test.o SUBDIRS = undo include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp index 26c61f36a7..90547868d3 100644 --- a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp @@ -203,7 +203,6 @@ int UHeapPagePrune(Relation relation, const RelationBuffer *relbuf, TransactionI bool executePruning = false; errno_t rc; bool hasPruned = false; - UPageVerifyParams verifyParams; if (pruned) { *pruned = false; @@ -368,11 +367,7 @@ int UHeapPagePrune(Relation relation, const RelationBuffer *relbuf, TransactionI END_CRIT_SECTION(); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relation, page, BufferGetBlockNumber(relbuf->buffer), - InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr))) { - (void) ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); - } + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_ROWS)); /* * Report the number of tuples reclaimed to pgstats. This is ndeleted diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp index 86b2b7747c..f9996a84c8 100644 --- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp @@ -1679,7 +1679,7 @@ static void RedoUndoInsertBlock(XLogBlockHead *blockhead, XLogBlockUndoParse *bl if (!skipInsert) { InsertPreparedUndo(t_thrd.ustore_cxt.urecvec, lsn); } - VerifyUndoRecordValid(undorec); + UndoRecordVerify(undorec); XLogReaderState record; XLogRecord decodedRecord; @@ -1768,7 +1768,7 @@ static void RedoUndoDeleteBlock(XLogBlockHead *blockhead, XLogBlockUndoParse *bl undo::RedoUndoMeta(&record, xlundometa, urecptr, t_thrd.ustore_cxt.urecvec->LastRecord(), t_thrd.ustore_cxt.urecvec->LastRecordSize()); - VerifyUndoRecordValid(undorec); + UndoRecordVerify(undorec); UHeapResetPreparedUndo(); } @@ -1875,10 +1875,10 @@ static void RedoUndoUpdateBlock(XLogBlockHead *blockhead, XLogBlockUndoParse *bl URecVector *urecvec = t_thrd.ustore_cxt.urecvec; UndoRecord *undorec = (*urecvec)[0]; - VerifyUndoRecordValid(undorec); - if (!inplaceUpdate) { - UndoRecord *newundorec = (*urecvec)[1]; - VerifyUndoRecordValid(newundorec); + if (inplaceUpdate) { + UndoRecordVerify(undorec); + } else { + UndoRecordVerify(newundorec); } UHeapResetPreparedUndo(); } diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 029e20f427..47f9d494d4 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -558,7 +558,6 @@ Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkIn uint16 lower; int retryTimes = 0; int options = 0; - UPageVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_INSERT_FAILED, WhiteboxDefaultErrorEmit); if (utuple == NULL) { @@ -721,13 +720,12 @@ reacquire_buffer: END_CRIT_SECTION(); /* Clean up */ Assert(UHEAP_XID_IS_TRANS(tuple->disk_tuple->flag)); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, blkno, ItemPointerGetOffsetNumber(&(tuple->ctid)), - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, DML_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); + if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, false, + (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), + ItemPointerGetOffsetNumber(&(tuple->ctid))); + UndoRecordVerify(undorec); } - - VerifyUndoRecordValid(undorec); UHeapFinalizeDML(rel, buffer, NULL, utuple, tuple, NULL, false, false); return InvalidOid; @@ -2023,7 +2021,6 @@ TM_Result UHeapDelete(Relation relation, ItemPointer tid, CommandId cid, Snapsho bool multixidIsMyself = false; TransactionId minXidInTDSlots = InvalidTransactionId; int retryTimes = 0; - UPageVerifyParams verifyParams; Assert(ItemPointerIsValid(tid)); @@ -2317,14 +2314,15 @@ check_tup_satisfies_update: } pfree(undotup.data); Assert(UHEAP_XID_IS_TRANS(utuple.disk_tuple->flag)); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relation, page, blkno, offnum, - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, DML_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); + if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, + (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), offnum); + + UndoRecord *undorec = (*t_thrd.ustore_cxt.urecvec)[0]; + UndoRecordVerify(undorec); } - UndoRecord *undorec = (*t_thrd.ustore_cxt.urecvec)[0]; - VerifyUndoRecordValid(undorec); + UHeapFinalizeDML(relation, buffer, NULL, &utuple, NULL, &(utuple.ctid), hasTupLock, false); return TM_Ok; @@ -2461,7 +2459,6 @@ TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer ot TransactionId minXidInTDSlots = InvalidTransactionId; bool oldBufLockReleased = false; int retryTimes = 0; - UPageVerifyParams verifyParams; Assert(newtup->tupTableType == UHEAP_TUPLE); Assert(ItemPointerIsValid(otid)); @@ -3315,27 +3312,24 @@ check_tup_satisfies_update: /* be tidy */ pfree(undotup.data); Assert(UHEAP_XID_IS_TRANS(uheaptup->disk_tuple->flag)); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, (char *) &verifyParams, - relation, page, block, ItemPointerGetOffsetNumber(&oldtup.ctid), - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, DML_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); - } + if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { + + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, + (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), ItemPointerGetOffsetNumber(&oldtup.ctid)); - if(!useInplaceUpdate) { - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relation, BufferGetPage(newbuf), BufferGetBlockNumber(newbuf), - ItemPointerGetOffsetNumber(&(uheaptup->ctid)), - NULL, NULL, InvalidXLogRecPtr, NULL, NULL, DML_VERIFY))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); + if (!useInplaceUpdate) { + Page newPage = BufferGetPage(newbuf); + UpageVerify((UHeapPageHeader)newPage, InvalidXLogRecPtr, NULL, relation, false, + (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), ItemPointerGetOffsetNumber(&(uheaptup->ctid))); } } URecVector *urecvec = t_thrd.ustore_cxt.urecvec; UndoRecord *oldundorec = (*urecvec)[0]; - VerifyUndoRecordValid(oldundorec); + UndoRecordVerify(oldundorec); if (!useInplaceUpdate) { UndoRecord *newundorec = (*urecvec)[1]; - VerifyUndoRecordValid(newundorec); + UndoRecordVerify(newundorec); } UHeapFinalizeDML(relation, buffer, &newbuf, newtup, uheaptup, &(oldtup.ctid), @@ -3394,7 +3388,6 @@ void UHeapMultiInsert(Relation relation, UHeapTuple *tuples, int ntuples, Comman /* needwal can also be passed in by options */ bool needwal = RelationNeedsWAL(relation); bool skipUndo = false; - UPageVerifyParams verifyParams; saveFreeSpace = RelationGetTargetPageFreeSpace(relation, UHEAP_DEFAULT_FILLFACTOR); @@ -3439,6 +3432,7 @@ void UHeapMultiInsert(Relation relation, UHeapTuple *tuples, int ntuples, Comman UHeapFreeOffsetRanges *ufreeOffsetRanges = NULL; bool setTupleXid = false; ShortTransactionId tupleXid = 0; + OffsetNumber verifyOffnum[MaxOffsetNumber] = {InvalidOffsetNumber}; CHECK_FOR_INTERRUPTS(); @@ -3557,6 +3551,7 @@ reacquire_buffer: */ Assert(offnum == ItemPointerGetOffsetNumber(&(uheaptup->ctid))); + verifyOffnum[nthispage] = offnum; nthispage++; } @@ -3655,11 +3650,15 @@ reacquire_buffer: } END_CRIT_SECTION(); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relation, page, BufferGetBlockNumber(buffer), - InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr, NULL, NULL, 0))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); + + if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { + UpageVerifyHeader((UHeapPageHeader)page, InvalidXLogRecPtr, relation); + for (int k = 0; k < nthispage; k++) { + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, + (USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), verifyOffnum[k]); + } } + pfree(ufreeOffsetRanges); UnlockReleaseBuffer(buffer); if (!skipUndo) { @@ -4199,12 +4198,7 @@ bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquir } cleanup: - UPageVerifyParams verifyParams; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relation, page, BufferGetBlockNumber(buf), - InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); - } + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation); if (frozenSlots != NULL) pfree(frozenSlots); @@ -5615,7 +5609,6 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) Page page = NULL; int zoneId; uint16 tdCount = 0; - UPageVerifyParams verifyParams; buffer = ReadBuffer(relation, blkno); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -5758,11 +5751,7 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) END_CRIT_SECTION(); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relation, page, blkno, - InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr))) { - (void) ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); - } + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation); UnlockReleaseBuffer(buffer); diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index be9e6c6a6b..1d7d4315f1 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -282,12 +282,7 @@ void ExecuteUndoActionsPage(UndoRecPtr fromUrp, Relation rel, Buffer buffer, Tra END_CRIT_SECTION(); } - UPageVerifyParams verifyParams; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, rel, page, BufferGetBlockNumber(buffer), InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); - } + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } @@ -602,12 +597,7 @@ int UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, TransactionI } END_CRIT_SECTION(); - UPageVerifyParams verifyParams; - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_UPAGE, USTORE_VERIFY_COMPLETE, - (char *) &verifyParams, relationData.relation, page, blkno, InvalidOffsetNumber, - NULL, NULL, InvalidXLogRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_UPAGE, (char *) &verifyParams); - } + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relationData.relation); UnlockReleaseBuffer(buffer); diff --git a/src/gausskernel/storage/access/ustore/knl_upage.cpp b/src/gausskernel/storage/access/ustore/knl_upage.cpp index f616b6ca77..a2f8e45667 100644 --- a/src/gausskernel/storage/access/ustore/knl_upage.cpp +++ b/src/gausskernel/storage/access/ustore/knl_upage.cpp @@ -29,6 +29,10 @@ #define ISNULL_BITMAP_NUMBER 2 #define HIGH_BITS_LENGTH_OF_LSN 32 +static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber off, TupleDesc tupDesc, Relation rel, bool isRedo = false); +static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRedo = false); +static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Relation rel); + template void UPageInit(Page page, Size pageSize, Size specialSize, uint8 tdSlots) { Assert(pageSize == BLCKSZ); @@ -205,8 +209,6 @@ static bool CalculateLowerUpperPointers(Page page, OffsetNumber offsetNumber, It /* adjust page header */ uphdr->pd_lower = (uint16) lower; uphdr->pd_upper = (uint16) upper; - FastVerifyUPageRowPtr(itemId, uphdr, offsetNumber); - return true; } @@ -662,384 +664,332 @@ bool VerifyPageHeader(Page page) return true; } -static bool VerifyUPageHeader(Page page, VerifyLevel level, int logLevel) +static int RpCompare(const void *rp1, const void *rp2) { - if (page == NULL) { - return false; - } + return ((RpSort)rp1)->start - ((RpSort)rp2)->start; +} - UHeapPageHeader phdr = (UHeapPageHeader)page; - uint16 pdLower = phdr->pd_lower; - uint16 pdUpper = phdr->pd_upper; - uint16 pdSpecial = phdr->pd_special; - uint16 potentialSpace = phdr->potential_freespace; - uint16 tdCount = phdr->td_count; - TransactionId pruneXid = phdr->pd_prune_xid; - TransactionId xidBase = phdr->pd_xid_base; - - if (pdLower < (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(phdr)) || pdLower > pdUpper || - pdUpper > pdSpecial || pdSpecial != BLCKSZ || potentialSpace > BLCKSZ) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("upage header invalid: lower = %u, upper = %u, special = %u, potential = %u.", - pdLower, pdUpper, pdSpecial, potentialSpace))); +void FastVerifyUTuple(UHeapDiskTuple diskTup, Buffer buffer) +{ + if (u_sess->attr.attr_storage.ustore_verify_level < (int) USTORE_VERIFY_DEFAULT) { + return; } - if (tdCount <= 0 || tdCount > UHEAP_MAX_TD) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("upage tdcount invalid: tdcount = %u.", tdCount))); + int tdSlot = UHeapTupleHeaderGetTDSlot(diskTup); + int tdCount = UHEAP_MAX_TD; + BlockNumber blockno = InvalidBlockNumber; + Oid relId = InvalidOid; + uint16 reserved = diskTup->reserved; + if (!BufferIsInvalid(buffer)) { + BufferDesc *bufdesc = GetBufferDescriptor(buffer - 1); + Page page = BufferGetPage(buffer); + UHeapPageHeaderData *phdr = (UHeapPageHeaderData *)page; + tdCount = phdr->td_count; + relId = bufdesc->tag.rnode.relNode; + blockno = BufferGetBlockNumber(buffer); } - - if (TransactionIdFollows(pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid) || - TransactionIdFollows(xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("upage xidbase invalid: xidbase = %lu, nextxid = %lu.", - xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + if (tdSlot < 0 || tdSlot > tdCount) { + ereport(PANIC, (errmodule(MOD_USTORE), errmsg( + "verify utuple invalid! " + "LogInfo: tdSlot %d, tdcount %u, reserved %u. " + "TransInfo: oid %u, blockno %u.", + tdSlot, tdCount, reserved, relId, blockno))); } +} - return true; +static int getModule(bool isRedo) +{ + return isRedo ? USTORE_VERIFY_MOD_REDO : USTORE_VERIFY_MOD_UPAGE; } -static bool VerifyTDInfo(Page page, int tdId, UHeapTupleTransInfo *tdinfo, VerifyLevel level, - bool tdReuse, TransactionId tupXid, int logLevel) + + +void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, Relation rel, bool isRedo, uint8 mask, OffsetNumber num) { - if (page == NULL || tdinfo == NULL) { - return false; + BYPASS_VERIFY(getModule(isRedo), rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST); + uint8 curMask = mask & USTORE_VERIFY_UPAGE_MASK; + if ((curMask & USTORE_VERIFY_UPAGE_HEADER) > 0) { + UpageVerifyHeader(header, lastRedo, rel, isRedo); } - - UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(page); - UHeapPageHeaderData *phdr = (UHeapPageHeaderData *)page; - - if (tdId == UHEAPTUP_SLOT_FROZEN) { - tdinfo->td_slot = tdId; - tdinfo->cid = InvalidCommandId; - tdinfo->xid = InvalidTransactionId; - tdinfo->urec_add = INVALID_UNDO_REC_PTR; - return true; + + if ((curMask & USTORE_VERIFY_UPAGE_ROWS) > 0) { + UpageVerifyAllRowptr(header, rel, isRedo); } - - if (tdId < 1 || tdId > phdr->td_count) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("An out of bounds of the array td_info, tdid = %d.", tdId))); + if ((curMask & USTORE_VERIFY_UPAGE_ROW) > 0 && num != InvalidOffsetNumber) { + RowPtr *rowptr = UPageGetRowPtr(header, num); + UpageVerifyRowptr(rowptr, (Page)header, num, rel); } - TD *thistrans = &tdPtr->td_info[tdId - 1]; - - if ((level > USTORE_VERIFY_FAST) && - TransactionIdFollows(thistrans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("td xid invalid: tdid %d, tdxid = %lu, nextxid = %lu.", - tdId, thistrans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } + if (curMask & USTORE_VERIFY_UPAGE_TUPLE) { + if (num != InvalidOffsetNumber) { + UpageVerifyTuple(header, num, tupDesc, rel, isRedo); + } else { + for (OffsetNumber offNum= FirstOffsetNumber; offNum <= UHeapPageGetMaxOffsetNumber((char *)header); offNum++) { + UpageVerifyTuple(header, offNum, tupDesc, rel, isRedo); + } + } - if (!tdReuse && (!TransactionIdIsValid(thistrans->xactid) || - (TransactionIdIsValid(tupXid) && !TransactionIdEquals(thistrans->xactid, tupXid)))) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("tup xid inconsistency with td: tupxid = %lu, tdxid = %lu, urp %lu.", - tupXid, thistrans->xactid, thistrans->undo_record_ptr))); } - tdinfo->td_slot = tdId; - tdinfo->cid = InvalidCommandId; - tdinfo->urec_add = thistrans->undo_record_ptr; - tdinfo->xid = thistrans->xactid; - - return true; } -static bool VerifyUTuple(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, - TupleDesc tupDesc, VerifyLevel level, int logLevel) +void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, Relation rel, bool isRedo) { - RowPtr *rp = UPageGetRowPtr(page, offnum); - UHeapDiskTuple diskTup = (UHeapDiskTuple)UPageGetRowData(page, rp); - int tdId = UHeapTupleHeaderGetTDSlot(diskTup); - bool isInvalidSlot = UHeapTupleHasInvalidXact(diskTup->flag); - TransactionId tupXid = UDiskTupleGetModifiedXid(diskTup, page); - UHeapTupleTransInfo tdinfo; - int tupSize = (rel == NULL) ? 0 : CalTupSize(rel, diskTup, tupDesc); - if (tupSize > (int)RowPtrGetLen(rp) || (diskTup->reserved != 0 && - diskTup->reserved != 0xFF)) { - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted tuple: tupsize = %d, rpsize = %u.", - tupSize, RowPtrGetLen(rp)))); - } - VerifyTDInfo(page, tdId, &tdinfo, level, isInvalidSlot, tupXid, logLevel); - if (!isInvalidSlot && IS_VALID_UNDO_REC_PTR(tdinfo.urec_add) && - (!TransactionIdIsValid(tdinfo.xid) || (TransactionIdIsValid(tupXid) && - !TransactionIdEquals(tdinfo.xid, tupXid)))) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("tup xid inconsistency with td: tupxid = %lu, tdxid = %lu.", - tupXid, tdinfo.xid))); + if (lastRedo != InvalidXLogRecPtr && PageGetLSN(header) < lastRedo) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|HEADER] Current lsn(%X/%X) in page is smaller than last checkpoint(%X/%X)).", + (uint32)(PageGetLSN(header) >> HIGH_BITS_LENGTH_OF_LSN), (uint32)PageGetLSN(header), + (uint32)(lastRedo >> HIGH_BITS_LENGTH_OF_LSN), (uint32)lastRedo))); } - if (level <= USTORE_VERIFY_FAST || !TransactionIdIsValid(tupXid)) { - return true; + if (unlikely(header->pd_lower < (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(header)) || + header->pd_lower > header->pd_upper || header->pd_upper > header->pd_special || + header->potential_freespace > BLCKSZ || header->pd_special != BLCKSZ)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|HEADER] lower = %u, upper = %u, special = %u, potential = %u.", + header->pd_lower, header->pd_upper, header->pd_special, header->potential_freespace))); } - if (isInvalidSlot) { - if (!UHeapTransactionIdDidCommit(tupXid)) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("tup xid not commit, tupxid = %lu.", tupXid))); - } - if (TransactionIdEquals(tdinfo.xid, tupXid)) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("td reused but xid equal td: tupxid = %lu, tdxid = %lu.", - tupXid, tdinfo.xid))); - } + if (header->td_count <= 0 || header->td_count > UHEAP_MAX_TD) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|HEADER] tdcount invalid: tdcount = %u.", header->td_count))); + } + + if (TransactionIdFollows(header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|HEADER] prune_xid invalid: prune_xid = %lu, nextxid = %lu.", + header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } - - return true; } -static int RpCompare(const void *rp1, const void *rp2) +static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber off, TupleDesc tupDesc, Relation rel, bool isRedo) { - return ((RpSort)rp1)->start - ((RpSort)rp2)->start; -} -static void VerifyUTupleForDML(Page page, OffsetNumber offnum, int logLevel) -{ - if (page == NULL || offnum == InvalidOffsetNumber) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Invalid page, offnum %d.", offnum))); + RowPtr *rp = NULL; + UHeapDiskTuple diskTuple = NULL; + int tdSlot = InvalidTDSlotId; + bool hasInvalidXact = false; + TransactionId tupXid = InvalidTransactionId; + UHeapTupleTransInfo td_info = {InvalidTDSlotId, InvalidTransactionId, InvalidCommandId, INVALID_UNDO_REC_PTR}; + + rp = UPageGetRowPtr(header, off); + if (RowPtrIsNormal(rp)) { + diskTuple = (UHeapDiskTuple)UPageGetRowData(header, rp); + tdSlot = UHeapTupleHeaderGetTDSlot(diskTuple); + hasInvalidXact = UHeapTupleHasInvalidXact(diskTuple->flag); + tupXid = UDiskTupleGetModifiedXid(diskTuple, (Page)header); + int tup_size = 0; + tup_size = (rel == NULL) ? 0 : CalTupSize(rel, diskTuple, tupDesc); + if (tup_size > (int)RowPtrGetLen(rp) || (diskTuple->reserved != 0 && + diskTuple->reserved != 0xFF)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE]corrupted tuple: tupsize = %d, rpsize = %u.", + tup_size, RowPtrGetLen(rp)))); + return; + } + + td_info.td_slot = tdSlot; + if ((tdSlot != UHEAPTUP_SLOT_FROZEN)) { + if (tdSlot < 1 || tdSlot > header->td_count) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE] tdSlot out of bounds, tdSlot = %d, td_count = %d.", tdSlot, header->td_count))); + return; + } + + UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(header); + TD *this_trans = &tdPtr->td_info[tdSlot - 1]; + td_info.td_slot = tdSlot; + td_info.cid = InvalidCommandId; + td_info.urec_add = this_trans->undo_record_ptr; + td_info.xid = this_trans->xactid; + + TransactionId xid = this_trans->xactid; + if (TransactionIdFollows(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE] tdxid invalid: tdSlot = %d, tdxid = %lu, nextxid = %lu.", + tdSlot, xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + } + + if (TransactionIdIsValid(xid) && !TransactionIdDidCommit(xid) && + TransactionIdPrecedes(xid, g_instance.undo_cxt.globalFrozenXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE] Transaction %lu in tdslot(%d) is smaller than global frozen xid %lu.", + xid, tdSlot, g_instance.undo_cxt.globalFrozenXid))); + } + } + + if (!hasInvalidXact && IS_VALID_UNDO_REC_PTR(td_info.urec_add) && + (!TransactionIdIsValid(td_info.xid) || (TransactionIdIsValid(tupXid) && !TransactionIdEquals(td_info.xid, tupXid)))) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE] tup xid inconsistency with td: tupxid = %lu, tdxid = %lu, urp %lu.", + tupXid, td_info.xid, td_info.urec_add))); + return; + } + + if (!TransactionIdIsValid(tupXid)) { + return; + } + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + if (hasInvalidXact) { + if (!UHeapTransactionIdDidCommit(tupXid) && !t_thrd.xlog_cxt.InRecovery) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE] tup xid not commit, tupxid = %lu.", tupXid))); + return; + } + if (TransactionIdEquals(td_info.xid, tupXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE] td reused but xid equal td: tupxid = %lu, tdxid = %lu.", + tupXid, td_info.xid))); + return; + } + } } +} +static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Relation rel) +{ + BYPASS_VERIFY(USTORE_VERIFY_MOD_UPAGE, rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) UHeapPageHeader phdr = (UHeapPageHeader)page; int nline = UHeapPageGetMaxOffsetNumber(page); - int i; - RowPtr *rp = UPageGetRowPtr(page, offnum); - UHeapDiskTuple diskTup = (UHeapDiskTuple)UPageGetRowData(page, rp); - uint32 offset = RowPtrGetOffset(rp); - uint32 len = SHORTALIGN(RowPtrGetLen(rp)); - int tdId = UHeapTupleHeaderGetTDSlot(diskTup); - bool isInvalidSlot = UHeapTupleHasInvalidXact(diskTup->flag); - TransactionId tupXid = UDiskTupleGetModifiedXid(diskTup, page); + UHeapDiskTuple diskTuple = (UHeapDiskTuple)UPageGetRowData(page, rowPtr); + uint32 offset = RowPtrGetOffset(rowPtr); + uint32 len = SHORTALIGN(RowPtrGetLen(rowPtr)); + int tdSlot = UHeapTupleHeaderGetTDSlot(diskTuple); + bool hasInvalidXact = UHeapTupleHasInvalidXact(diskTuple->flag); + TransactionId tupXid = UDiskTupleGetModifiedXid(diskTuple, page); + TransactionId locker = UHeapDiskTupleGetRawXid(diskTuple, page); + TransactionId topXid = GetTopTransactionId(); UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(page); - if(!RowPtrIsNormal(rp)) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Rp is abnormal (flas:%d, offset %d, len %d).", - rp->flags, rp->offset, rp->len))); + if (!RowPtrIsNormal(rowPtr)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr is abnormal (flags:%d, offset %d, len %d).", + rowPtr->flags, offset, len))); + return; } - if (tdId < 1 || tdId > phdr->td_count) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Invalid tdid %d, td count %d.", tdId, phdr->td_count))); + if (tdSlot < 1 || tdSlot > phdr->td_count) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ROWPTR] Invalid tdSlot %d, td count of page is %d.", tdSlot, phdr->td_count))); + return; } - TD *thistrans = &tdPtr->td_info[tdId - 1]; - - if (!IS_VALID_UNDO_REC_PTR(thistrans->undo_record_ptr) || isInvalidSlot || - !TransactionIdEquals(thistrans->xactid, GetTopTransactionId()) || - !TransactionIdEquals(thistrans->xactid, tupXid)) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Td xid invalid: tdid %d, tdxid %lu, topxid %lu, " - "tupxid %lu, isInvalidSlot %d.", tdId, thistrans->xactid, GetTopTransactionId(), - tupXid, isInvalidSlot))); + TD *thistrans = &tdPtr->td_info[tdSlot - 1]; + UndoRecPtr tdUrp = thistrans->undo_record_ptr; + TransactionId tdXid = thistrans->xactid; + if (UHEAP_XID_IS_LOCK(diskTuple->flag)) { + if (!TransactionIdEquals(locker, topXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ROWPTR] locker invalid: locker %lu, topxid %lu.", locker, topXid))); + return; + } + } else if (!IS_VALID_UNDO_REC_PTR(tdUrp) || hasInvalidXact || !TransactionIdEquals(tdXid, locker) || + !TransactionIdEquals(tdXid, topXid) || !TransactionIdEquals(tdXid, tupXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ROWPTR] Td xid invalid: tdSlot %d, tdxid %lu, topxid %lu, " + "tupxid %lu, isInvalidSlot %d.", tdSlot, tdXid, topXid, tupXid, hasInvalidXact))); + return; } - - for (i = FirstOffsetNumber; i <= nline; i++) { + for (int i = FirstOffsetNumber; i <= nline; i++) { if (i == offnum) { continue; } - rp = UPageGetRowPtr(page, i); + RowPtr *rp = UPageGetRowPtr(page, i); if (RowPtrIsNormal(rp)) { uint32 tupOffset = RowPtrGetOffset(rp); uint32 tupLen = SHORTALIGN(RowPtrGetLen(rp)); if (tupOffset < offset) { if (tupOffset + tupLen > offset) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Abnormal rowptr data, flags %d, offset %u, len %d, alignTupLen %u, targetRpOffset %u", + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr data is abnormal, flags %d, offset %u," + " len %d, alignTupLen %u, targetRpOffset %u", rp->flags, tupOffset, RowPtrGetLen(rp), tupLen, offset))); } } else if (offset + len > tupOffset) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Abnormal rowptr data, flags %d, offset %u, len %d, alignTupLen %u, targetRpOffset %u, targetRpLen %u.", + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr data is abnormal, flags %d, offset %u," + " len %d, alignTupLen %u, targetRpOffset %u, targetRpLen %u.", rp->flags, tupOffset, RowPtrGetLen(rp), tupLen, offset, len))); } } } - - return; } - -static bool VerifyUPageRowPtr(Relation rel, Page page, BlockNumber blkno, TupleDesc tupDesc, - VerifyLevel level, int logLevel) +static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRedo) { - UHeapPageHeader phdr = (UHeapPageHeader)page; - uint16 pdLower = phdr->pd_lower; - uint16 pdUpper = phdr->pd_upper; - uint16 pdSpecial = phdr->pd_special; - int nline = UHeapPageGetMaxOffsetNumber(page); + int nline = UHeapPageGetMaxOffsetNumber((char *)header); int tdSlot = 0; int nstorage = 0; int i; - RpSortData rpBase[MaxPossibleUHeapTuplesPerPage]; - RpSort rpSortPtr = rpBase; + RpSortData rowptrs[MaxPossibleUHeapTuplesPerPage]; + RpSort sortPtr = rowptrs; RowPtr *rp = NULL; - UHeapTupleTransInfo tdinfo; for (i = FirstOffsetNumber; i <= nline; i++) { - rp = UPageGetRowPtr(page, i); + rp = UPageGetRowPtr(header, i); if (RowPtrIsNormal(rp)) { - rpSortPtr->start = RowPtrGetOffset(rp); - rpSortPtr->end = rpSortPtr->start + SHORTALIGN(RowPtrGetLen(rp)); - rpSortPtr->offset = i; - if (rpSortPtr->start < pdUpper || rpSortPtr->end > pdSpecial) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted line pointer: offset = %u, rpstart = %u, " + sortPtr->start = (int)RowPtrGetOffset(rp); + sortPtr->end = sortPtr->start + (int)SHORTALIGN(RowPtrGetLen(rp)); + + sortPtr->offset = i; + if (sortPtr->start < header->pd_upper || sortPtr->end > header->pd_special) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ALLROWPTR]corrupted rowptr: offset = %u, rpstart = %u, " "rplen = %u, pdlower = %u, pdupper = %u.", - i, RowPtrGetOffset(rp), RowPtrGetLen(rp), pdLower, pdUpper))); + i, RowPtrGetOffset(rp), RowPtrGetLen(rp), header->pd_lower, header->pd_upper))); + return; } - rpSortPtr++; - VerifyUTuple(rel, page, blkno, i, tupDesc, level, logLevel); + sortPtr++; } else if (RowPtrIsDeleted(rp)) { - bool tdReuse = (RowPtrGetVisibilityInfo(rp) & ROWPTR_XACT_INVALID); tdSlot = RowPtrGetTDSlot(rp); if (tdSlot == UHEAPTUP_SLOT_FROZEN) { - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("rowptr(offsetnumber = %d) tdslot frozen, tdid = %d.", i, tdSlot))); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ALLROWPTR]rowptr(offsetnumber = %d) tdslot frozen, tdSlot = %d.", i, tdSlot))); + return; + } + + UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(header); + TD * this_trans = &tdPtr->td_info[tdSlot - 1]; + if (tdSlot < 1 || tdSlot > header->td_count) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ALLROWPTR]tdSlot out of bounds, tdSlot = %d, td_count = %d.", tdSlot, header->td_count))); + return; + } + + if (TransactionIdFollows(this_trans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ALLROWPTR]tdxid invalid: tdSlot %d, tdxid = %lu, nextxid = %lu.", + tdSlot, this_trans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + return; } - VerifyTDInfo(page, tdSlot, &tdinfo, level, tdReuse, InvalidTransactionId, logLevel); } } - - nstorage = rpSortPtr - rpBase; - - if (nstorage <= 1 || level <= USTORE_VERIFY_FAST) { - return true; + + nstorage = sortPtr - rowptrs; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + if (nstorage <= 1) { + return; } - - qsort((char *)rpBase, nstorage, sizeof(RpSortData), RpCompare); - + + qsort((char *)rowptrs, nstorage, sizeof(RpSortData), RpCompare); + for (i = 0; i < nstorage - 1; i++) { - RpSort tempPtr1 = &rpBase[i]; - RpSort tempPtr2 = &rpBase[i + 1]; - if (tempPtr1->end > tempPtr2->start) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted line pointer: rp1offset %u, rp1start = %u, rp1end = %u, " + RpSort temp_ptr1 = &rowptrs[i]; + RpSort temp_ptr2 = &rowptrs[i + 1]; + if (temp_ptr1->end > temp_ptr2->start) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|ALLROWPTR]corrupted line pointer: rp1offset %u, rp1start = %u, rp1end = %u, " "rp2offset = %u, rp2start = %u, rp2end = %u.", - tempPtr1->offset, tempPtr1->start, tempPtr1->end, - tempPtr2->offset, tempPtr2->start, tempPtr2->end))); - } - } - - return true; -} - -/* - * Checks whether the LSN in the header of the uheap page is smaller than the value - * of the last checkpoint. This check item is mainly used for verification after the page - * is modified in parallel redo mode.. - */ -static void ValidateUPageLsn(Page page, XLogRecPtr lastCheckpoint, int logLevel) -{ - if (page == NULL || lastCheckpoint == InvalidXLogRecPtr) { - return; - } - - if (PageGetLSN(page) < lastCheckpoint) { - ereport(logLevel, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Current lsn(%X/%X) in page is smaller than last checkpoint(%X/%X)).", - (uint32)(PageGetLSN(page) >> HIGH_BITS_LENGTH_OF_LSN), (uint32)PageGetLSN(page), - (uint32)(lastCheckpoint >> HIGH_BITS_LENGTH_OF_LSN), (uint32)lastCheckpoint))); - } -} - -bool VerifyUPageValid(UPageVerifyParams *verifyParams) -{ - if (verifyParams == NULL) { - return false; - } - - VerifyLevel vLevel = verifyParams->bvInfo.vLevel; - Relation rel = verifyParams->bvInfo.rel; - Page page = verifyParams->page; - BlockNumber blkno = verifyParams->blk; - TupleDesc tupDesc = verifyParams->tupDesc; - OffsetNumber offnum = verifyParams->offnum; - XLogRecPtr latestRedo = InvalidXLogRecPtr; - int logLevel = (verifyParams->bvInfo.process == ANALYZE_VERIFY) ? WARNING : ERROR; - - VerifyUPageHeader(page, vLevel, logLevel); - if (vLevel <= USTORE_VERIFY_FAST && (verifyParams->bvInfo.process == DML_VERIFY)) { - VerifyUTupleForDML(page, offnum, logLevel); - } else { - VerifyUPageRowPtr(rel, page, blkno, tupDesc, vLevel, logLevel); - } - ValidateUPageLsn(page, latestRedo, logLevel); - return true; -} - -bool VerifyRedoUPageValid(URedoVerifyParams*verifyParams) -{ - if (verifyParams == NULL) { - return false; - } - - VerifyLevel vLevel = verifyParams->pageVerifyParams.bvInfo.vLevel; - Relation rel = verifyParams->pageVerifyParams.bvInfo.rel; - Page page = verifyParams->pageVerifyParams.page; - BlockNumber blkno = verifyParams->pageVerifyParams.blk; - TupleDesc tupDesc = verifyParams->pageVerifyParams.tupDesc; - XLogRecPtr latestRedo = verifyParams->latestRedo; - - VerifyUPageHeader(page, vLevel, PANIC); - VerifyUPageRowPtr(rel, page, blkno, tupDesc, vLevel, PANIC); - ValidateUPageLsn(page, latestRedo, PANIC); - return true; -} - - -void FastVerifyUTuple(UHeapDiskTuple diskTup, Buffer buffer) -{ - if (u_sess->attr.attr_storage.ustore_verify_level < (int) USTORE_VERIFY_DEFAULT) { - return; - } - - int tdId = UHeapTupleHeaderGetTDSlot(diskTup); - int tdCount = UHEAP_MAX_TD; - BlockNumber blockno = InvalidBlockNumber; - Oid relId = InvalidOid; - uint16 reserved = diskTup->reserved; - if (!BufferIsInvalid(buffer)) { - BufferDesc *bufdesc = GetBufferDescriptor(buffer - 1); - Page page = BufferGetPage(buffer); - UHeapPageHeaderData *phdr = (UHeapPageHeaderData *)page; - tdCount = phdr->td_count; - relId = bufdesc->tag.rnode.relNode; - blockno = BufferGetBlockNumber(buffer); - } - if (tdId < 0 || tdId > tdCount) { - ereport(PANIC, (errmodule(MOD_USTORE), errmsg( - "verify utuple invalid! " - "LogInfo: tdid %d, tdcount %u, reserved %u. " - "TransInfo: oid %u, blockno %u.", - tdId, tdCount, reserved, relId, blockno))); - } -} - -RowPtr *FastVerifyUPageRowPtr(RowPtr *rp, UHeapPageHeader uphdr, OffsetNumber offsetNumber) -{ - OffsetNumber maxOffsetNum = UHeapPageGetMaxOffsetNumber((char *)uphdr); - if (offsetNumber > maxOffsetNum) { - return rp; - } - if (u_sess->attr.attr_storage.ustore_verify_level >= (int) USTORE_VERIFY_DEFAULT) { - if (RowPtrIsNormal(rp)) { - if (RowPtrGetOffset(rp) < uphdr->pd_upper || RowPtrGetOffset(rp) >= uphdr->pd_special || - RowPtrGetOffset(rp) + RowPtrGetLen(rp) > BLCKSZ) { - ereport(PANIC, - (errmodule(MOD_USTORE), - errmsg("row pointer error, offset:%u, flags:%u, len:%u, upper:%u, special:%u.", - RowPtrGetOffset(rp), rp->flags, RowPtrGetLen(rp), (uphdr)->pd_upper, - (uphdr)->pd_special))); - } - } else if (RowPtrGetLen(rp) != 0) { - ereport(PANIC, - (errmodule(MOD_USTORE), errmsg("row pointer's length is too long, offset:%u, flags:%u, len:%u.", - RowPtrGetOffset(rp), (rp)->flags, RowPtrGetLen(rp)))); + temp_ptr1->offset, temp_ptr1->start, temp_ptr1->end, + temp_ptr2->offset, temp_ptr2->start, temp_ptr2->end))); + return; } } - return rp; } \ No newline at end of file diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index 114ef6c537..c6951b170e 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -172,7 +172,7 @@ static UndoRecPtr PrepareAndInsertUndoRecordForInsertRedo(XLogReaderState *recor undo::RedoUndoMeta(record, &undometa, xlundohdr->urecptr, t_thrd.ustore_cxt.urecvec->LastRecord(), t_thrd.ustore_cxt.urecvec->LastRecordSize()); - VerifyUndoRecordValid(undorec); + UndoRecordVerify(undorec); UHeapResetPreparedUndo(); } @@ -267,7 +267,6 @@ void UHeapXlogInsert(XLogReaderState *record) TupleBuffer tbuf; bool allReplay = !AmPageRedoWorker() || !SUPPORT_USTORE_UNDO_WORKER; bool onlyReplayUndo = allReplay ? false : parallel_recovery::DoPageRedoWorkerReplayUndo(); - URedoVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_XLOG_INSERT_FAILED, WhiteboxDefaultErrorEmit); @@ -279,11 +278,10 @@ void UHeapXlogInsert(XLogReaderState *record) action = GetInsertRedoAction(record, &buffer, skipSize); if (action == BLK_NEEDS_REDO) { PerformInsertRedoAction(record, buffer.buf, urecptr, tbuf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, BufferGetPage(buffer.buf), blkno, InvalidOffsetNumber, NULL, - NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buffer.buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -403,7 +401,7 @@ static UndoRecPtr PrepareAndInsertUndoRecordForDeleteRedo(XLogReaderState *recor } undo::RedoUndoMeta(record, &undometa, xlundohdr->urecptr, t_thrd.ustore_cxt.urecvec->LastRecord(), t_thrd.ustore_cxt.urecvec->LastRecordSize()); - VerifyUndoRecordValid(undorec); + UndoRecordVerify(undorec); UHeapResetPreparedUndo(); } @@ -454,7 +452,6 @@ static void UHeapXlogDelete(XLogReaderState *record) TupleBuffer tbuf; XlUHeapDelete *xlrec = (XlUHeapDelete *)XLogRecGetData(record); XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete); - URedoVerifyParams verifyParams; bool allReplay = !AmPageRedoWorker() || !SUPPORT_USTORE_UNDO_WORKER; bool onlyReplayUndo = allReplay ? false : parallel_recovery::DoPageRedoWorkerReplayUndo(); @@ -476,11 +473,10 @@ static void UHeapXlogDelete(XLogReaderState *record) action = XLogReadBufferForRedo(record, 0, &buffer); if (action == BLK_NEEDS_REDO) { PerformDeleteRedoAction(record, &utup, &buffer, urecptr); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, BufferGetPage(buffer.buf), blkno, InvalidOffsetNumber, - NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buffer.buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -501,7 +497,6 @@ static void UHeapXlogFreezeTdSlot(XLogReaderState *record) BlockNumber blkno; int nFrozen = xlrec->nFrozen; int slotNo = 0; - URedoVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_XLOG_FREEZE_TD_SLOT_FAILED, WhiteboxDefaultErrorEmit); (void) XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno); @@ -540,10 +535,9 @@ static void UHeapXlogFreezeTdSlot(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer.buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, page, blkno, InvalidOffsetNumber, NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -561,7 +555,6 @@ static void UHeapXlogInvalidTdSlot(XLogReaderState *record) XLogRedoAction action; int slotNo = 0; BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_XLOG_INVALID_TD_SLOT_FAILED, WhiteboxDefaultErrorEmit); action = XLogReadBufferForRedo(record, 0, &buffer); @@ -588,10 +581,8 @@ static void UHeapXlogInvalidTdSlot(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer.buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, page, blkno, InvalidOffsetNumber, NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -713,7 +704,6 @@ static void UHeapXlogClean(XLogReaderState *record) BlockNumber blkno = InvalidBlockNumber; XLogRedoAction action; XLogRecPtr lsn = record->EndRecPtr; - URedoVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_XLOG_CLEAN_FAILED, WhiteboxDefaultErrorEmit); @@ -738,11 +728,10 @@ static void UHeapXlogClean(XLogReaderState *record) if (action == BLK_NEEDS_REDO) { PerformCleanRedoAction(record, &buffer, &freespace); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, BufferGetPage(buffer.buf), blkno, InvalidOffsetNumber, NULL, NULL, - t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buffer.buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -1006,10 +995,10 @@ static UndoRecPtr PrepareAndInsertUndoRecordForUpdateRedo(XLogReaderState *recor URecVector *urecvec = t_thrd.ustore_cxt.urecvec; UndoRecord *undorec = (*urecvec)[0]; - VerifyUndoRecordValid(undorec); - if (!inplaceUpdate) { - UndoRecord *newundorec = (*urecvec)[1]; - VerifyUndoRecordValid(newundorec); + if (inplaceUpdate) { + UndoRecordVerify(undorec); + } else { + UndoRecordVerify(newundorec); } UHeapResetPreparedUndo(); } @@ -1290,7 +1279,6 @@ static void UHeapXlogUpdate(XLogReaderState *record) uint16 *tdCount = NULL; XlUHeapUpdate *xlrec = (XlUHeapUpdate *)XLogRecGetData(record); bool inplaceUpdate = !(xlrec->flags & XLZ_NON_INPLACE_UPDATE); - URedoVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_XLOG_UPDATE_FAILED, WhiteboxDefaultErrorEmit); @@ -1325,11 +1313,10 @@ static void UHeapXlogUpdate(XLogReaderState *record) if (newaction == BLK_NEEDS_REDO) { newlen = GetUHeapDiskTupleFromUpdateNewRedoData(record, &tuples, &affixLens, tbuf, sameBlock); freespace = PerformUpdateNewRedoAction(record, &buffers, &tuples, newlen, xlnewundohdr, urecptr, sameBlock); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, - (char *) &verifyParams, NULL, BufferGetPage(buffers.newbuffer.buf), newblk, - InvalidOffsetNumber, NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buffers.newbuffer.buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffers.newbuffer.buf) && buffers.newbuffer.buf != buffers.oldbuffer.buf) { @@ -1638,7 +1625,6 @@ static void UHeapXlogMultiInsert(XLogReaderState *record) uint16 *tdCount = NULL; bool allReplay = !AmPageRedoWorker() || !SUPPORT_USTORE_UNDO_WORKER; bool onlyReplayUndo = allReplay ? false : parallel_recovery::DoPageRedoWorkerReplayUndo(); - URedoVerifyParams verifyParams; WHITEBOX_TEST_STUB(UHEAP_XLOG_MULTI_INSERT_FAILED, WhiteboxDefaultErrorEmit); @@ -1654,11 +1640,10 @@ static void UHeapXlogMultiInsert(XLogReaderState *record) /* Apply the wal for data */ if (action == BLK_NEEDS_REDO) { PerformMultiInsertRedoAction(record, xlrec, &buffer, urecptr, ufreeOffsetRanges); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, BufferGetPage(buffer.buf), blkno, InvalidOffsetNumber, NULL, NULL, - t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buffer.buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } pfree(ufreeOffsetRanges); @@ -1675,7 +1660,6 @@ static void UHeapXlogBaseShift(XLogReaderState *record) RedoBufferInfo buffer = { 0 }; XLogRecPtr lsn = record->EndRecPtr; BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; if (XLogReadBufferForRedo(record, HEAP_BASESHIFT_ORIG_BLOCK_NUM, &buffer) == BLK_NEEDS_REDO) { char *maindata = XLogRecGetData(record); @@ -1686,10 +1670,9 @@ static void UHeapXlogBaseShift(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer.buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, page, blkno, InvalidOffsetNumber, NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -1706,7 +1689,6 @@ static void UHeapXlogExtendTDSlot(XLogReaderState *record) errno_t ret = EOK; XlUHeapExtendTdSlots *xlrec = NULL; BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; xlrec = (XlUHeapExtendTdSlots *)XLogRecGetData(record); action = XLogReadBufferForRedo(record, 0, &buffer); @@ -1758,10 +1740,9 @@ static void UHeapXlogExtendTDSlot(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer.buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, page, blkno, InvalidOffsetNumber, NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { @@ -1784,7 +1765,6 @@ static void UHeapXlogFreeze(XLogReaderState *record) UHeapTupleData utuple; RelFileNode rnode; BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; (void)XLogRecGetBlockTag(record, HEAP_FREEZE_ORIG_BLOCK_NUM, &rnode, NULL, &blkno); /* @@ -1837,10 +1817,9 @@ static void UHeapXlogFreeze(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer.buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, (char *) &verifyParams, - NULL, page, blkno, InvalidOffsetNumber, NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buffer.buf)) { UnlockReleaseBuffer(buffer.buf); @@ -1976,7 +1955,6 @@ static void UHeapUndoXlogPage(XLogReaderState *record) XLogRedoAction action = XLogReadBufferForRedo(record, 0, &redoBuffInfo); Buffer buf = redoBuffInfo.buf; BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; XLogRecGetBlockTag(record, 0, NULL, NULL, &blkno); if (action == BLK_NEEDS_REDO) { @@ -2004,11 +1982,9 @@ static void UHeapUndoXlogPage(XLogReaderState *record) PageSetLSN(page, record->EndRecPtr); MarkBufferDirty(buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, - (char *) &verifyParams, NULL, page, blkno, InvalidOffsetNumber, - NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buf)) @@ -2022,7 +1998,6 @@ static void UHeapUndoXlogResetXid(XLogReaderState *record) XlUHeapUndoResetSlot *xlrec = (XlUHeapUndoResetSlot *)XLogRecGetData(record); XLogRedoAction action = XLogReadBufferForRedo(record, 0, &redoBuffInfo); BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; (void) XLogRecGetBlockTag(record, 0, NULL, NULL, &blkno); Buffer buf = redoBuffInfo.buf; @@ -2031,11 +2006,10 @@ static void UHeapUndoXlogResetXid(XLogReaderState *record) PageSetLSN(BufferGetPage(buf), lsn); MarkBufferDirty(buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, - (char *) &verifyParams, NULL, BufferGetPage(buf), blkno, InvalidOffsetNumber, - NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buf)) @@ -2051,7 +2025,6 @@ static void UHeapUndoXlogAbortSpecinsert(XLogReaderState *record) XLogRedoAction action = XLogReadBufferForRedo(record, 0, &redoBuffInfo); Buffer buf = redoBuffInfo.buf; BlockNumber blkno = InvalidBlockNumber; - URedoVerifyParams verifyParams; (void) XLogRecGetBlockTag(record, 0, NULL, NULL, &blkno); @@ -2098,11 +2071,10 @@ static void UHeapUndoXlogAbortSpecinsert(XLogReaderState *record) PageSetLSN(BufferGetPage(buf), lsn); MarkBufferDirty(buf); - if (unlikely(ConstructUstoreVerifyParam(USTORE_VERIFY_MOD_REDO, USTORE_VERIFY_FAST, - (char *) &verifyParams, NULL, BufferGetPage(buf), blkno, InvalidOffsetNumber, - NULL, NULL, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr))) { - ExecuteUstoreVerify(USTORE_VERIFY_MOD_REDO, (char *) &verifyParams); - } + + Page page = BufferGetPage(buf); + UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, + NULL, true); } if (BufferIsValid(buf)) diff --git a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp index 12b34540bd..c41bd3efc3 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp @@ -484,7 +484,7 @@ static UndoRecordState LoadUndoRecord(UndoRecord *urec, TransactionId *lastXid) urec->Load(false); state = undo::CheckUndoRecordValid(urec->Urp(), true, NULL); if (state == UNDO_RECORD_NORMAL) { - VerifyUndoRecordValid(urec, true); + UndoRecordVerify(urec); } } PG_CATCH(); @@ -622,3 +622,83 @@ bool InplaceSatisfyUndoRecord(_in_ UndoRecord *urec, _in_ BlockNumber blkno, _in return false; } + +void UndoRecordVerify(_in_ UndoRecord *urec) +{ + UNDO_BYPASS_VERIFY; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_DEFAULT) + if (!TransactionIdIsValid(urec->Xid())) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu is invalid"), urec->Xid()))); + } + if (TransactionIdIsValid(urec->Xid()) && + TransactionIdFollowsOrEquals(urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu >= nextXid %lu"), + urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + } + if (TransactionIdIsValid(urec->OldXactId()) && + TransactionIdFollowsOrEquals(urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. oldXactId %lu >= nextXid %lu"), + urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + } + if (!(IS_VALID_UNDO_REC_PTR(urec->Urp()))) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu is invalid"), urec->Urp()))); + return; + } + + int zoneId = (int)UNDO_PTR_GET_ZONE_ID(urec->Urp()); + undo::UndoZone *uzone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); + Assert(uzone != NULL); + if (uzone == NULL) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uzone is null. zoneId %d urp %lu"), zoneId, urec->Urp()))); + return; + } + if (IS_VALID_UNDO_REC_PTR(urec->Urp()) && urec->Urp() > uzone->GetInsertURecPtr()) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu > insertURecPtr %lu, zoneId %d"), + urec->Urp(), uzone->GetInsertURecPtr(), zoneId))); + } + if (IS_VALID_UNDO_REC_PTR(urec->Blkprev())) { + UndoRecPtr blkPrevZid = UNDO_PTR_GET_ZONE_ID(urec->Blkprev()); + undo::UndoZone *blkPrevZone = undo::UndoZoneGroup::GetUndoZone(blkPrevZid, false); + if (urec->Blkprev() > blkPrevZone->GetInsertURecPtr()) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. Blkprev %lu > insertURecPtr %lu, zoneId %d"), + urec->Blkprev(), uzone->GetInsertURecPtr(), zoneId))); + } + } + if ((urec->Uinfo() & UNDO_UREC_INFO_TRANSAC) != 0 || (urec->Uinfo() & UNDO_UREC_INFO_BLOCK) != 0) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d error"), (int)urec->Uinfo()))); + } + if ((urec->Uinfo() & UNDO_UREC_INFO_OLDTD) != 0 && !TransactionIdIsValid(urec->OldXactId())) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, oldXactId %lu is invalid"), + (int)urec->Uinfo(), urec->OldXactId()))); + } + if ((urec->Uinfo() & UNDO_UREC_INFO_HAS_PARTOID) != 0 && urec->Partitionoid() == InvalidOid) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu, uinfo %d, partitionoid is invalid"), + urec->Urp(), (int)urec->Uinfo()))); + } + if ((urec->Uinfo() & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0 && urec->Tablespace() == InvalidOid) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu, uinfo %d, tablespace is invalid"), + urec->Urp(), (int)urec->Uinfo()))); + } + if (urec->Utype() <= UNDO_UNKNOWN || urec->Utype() > UNDO_UPDATE) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d is invalid"), urec->Utype()))); + } + if ((urec->Utype() == UNDO_INSERT && urec->PayLoadLen() != 0) || + (urec->Utype() == UNDO_INSERT && (urec->Uinfo() & UNDO_UREC_INFO_PAYLOAD) != 0)) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d , payLoadLen %hu, uinfo %d"), + urec->Utype(), urec->PayLoadLen(), (int)urec->Uinfo()))); + } +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/ustore/knl_uundovec.cpp b/src/gausskernel/storage/access/ustore/knl_uundovec.cpp index d3af3ecee6..a378c2c491 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundovec.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundovec.cpp @@ -398,7 +398,7 @@ static bool LoadUndoRecordRange(UndoRecord *urec, Buffer *buffer) urec->Load(true); state = undo::CheckUndoRecordValid(urec->Urp(), true, NULL); if (state == UNDO_RECORD_NORMAL) { - VerifyUndoRecordValid(urec, true); + UndoRecordVerify(urec); } } PG_CATCH(); @@ -665,30 +665,6 @@ int PrepareUndoRecord(_in_ URecVector *urecvec, _in_ UndoPersistence upersistenc CheckLastRecordSize(undoSize, xlundometa); return UNDO_RET_SUCC; } - -void VerifyUndoRecordValid(UndoRecord *urec, bool needCheckXidInvalid) -{ - if (u_sess->attr.attr_storage.ustore_verify_level < (int) USTORE_VERIFY_DEFAULT) { - return; - } - - bool undoRecordNotValid = TransactionIdFollowsOrEquals(urec->Xid(), - t_thrd.xact_cxt.ShmemVariableCache->nextXid) || urec->Utype() == UNDO_UNKNOWN; - if (needCheckXidInvalid) { - undoRecordNotValid = undoRecordNotValid && TransactionIdIsValid(urec->Xid()); - } - if (undoRecordNotValid) { - ereport(PANIC, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT( - "undorec xid invalid %lu,nextXid xid %lu:" - "global recycle xid %lu, globalFrozenXid %lu, utype %d, urp_ %lu, uinfo %d "), - urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid, - pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid), - pg_atomic_read_u64(&g_instance.undo_cxt.globalFrozenXid), - urec->Utype(), urec->Urp(), urec->Uinfo()))); - } -} - void InsertPreparedUndo(_in_ URecVector *urecvec, _in_ XLogRecPtr lsn) { if (urecvec == NULL) { diff --git a/src/gausskernel/storage/access/ustore/knl_uverify.cpp b/src/gausskernel/storage/access/ustore/knl_uverify.cpp deleted file mode 100644 index dc53db924f..0000000000 --- a/src/gausskernel/storage/access/ustore/knl_uverify.cpp +++ /dev/null @@ -1,206 +0,0 @@ -/* -* Copyright (c) 2022 Huawei Technologies Co.,Ltd. -* -* openGauss is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* -* http://license.coscl.org.cn/MulanPSL2 -* -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -* --------------------------------------------------------------------------------------- -* -* knl_uverify.cpp -* Implementation of ustore verification -* -* -* IDENTIFICATION -* src/gausskernel/storage/access/ustore/knl_uverify.cpp -* -* --------------------------------------------------------------------------------------- -*/ - - -#include "postgres.h" - -#include "storage/buf/bufmgr.h" -#include "utils/rel.h" -#include "access/transam.h" -#include "access/ubtree.h" -#include "access/ustore/knl_upage.h" -#include "storage/freespace.h" - - bool PrecheckUstoreVerifyParams(uint32 module, Relation rel, bool analyzeVerify) - { - uint32 mainModule = module & USTORE_VERIFY_MOD_MASK; - - /* Check whether the module verification function is enabled. */ - if ((analyzeVerify && (u_sess->attr.attr_storage.ustore_verify_module & mainModule) == 0) || - (!analyzeVerify && ((u_sess->attr.attr_storage.ustore_verify_module & mainModule) == 0 || - u_sess->attr.attr_storage.ustore_verify_level <= USTORE_VERIFY_DEFAULT))) { - return false; - } - if ((mainModule <= USTORE_VERIFY_MOD_INVALID) || (mainModule > USTORE_VERIFY_MOD_MASK)) { - return false; - } - - if (!analyzeVerify && (rel != NULL && !RelationIsUstoreFormat(rel) && !RelationIsUstoreIndex(rel))) { - return false; - } - if (analyzeVerify && (rel != NULL && !RelationIsUstoreFormat(rel))) { - return false; - } - return true; -} - - /* Construct the ustore verify parameter structure. */ -bool ConstructUstoreVerifyParam(uint32 module, VerifyLevel vLevel, char *paramSt, Relation rel, Page page, - BlockNumber blk, OffsetNumber offnum, TupleDesc tupDesc, GPIScanDesc gpiScan, XLogRecPtr lastestRedo, undo::UndoZone *uZone, - undo::TransactionSlot *slot, int process) -{ - errno_t rc = EOK; - bool finishSetParams = false; - uint32 mainModule = module & USTORE_VERIFY_MOD_MASK; - uint32 subModule = module & USTORE_VERIFY_SUB_MOD_MASK; - bool analyzeVerify = (process == ANALYZE_VERIFY); - - /* Precheck verify parameters. */ - if (!PrecheckUstoreVerifyParams(module, rel, analyzeVerify)) { - return false; - } - - switch (mainModule) { - case USTORE_VERIFY_MOD_UPAGE: { - rc = memset_s(paramSt, sizeof(UPageVerifyParams), 0, sizeof(UPageVerifyParams)); - securec_check(rc, "\0", "\0"); - UPageVerifyParams *params = (UPageVerifyParams *) paramSt; - params->bvInfo.process = process; - params->bvInfo.vLevel = vLevel; - params->bvInfo.rel = rel; - params->page = page; - params->blk = blk; - params->offnum = offnum; - params->tupDesc = tupDesc; - finishSetParams = true; - } - break; - case USTORE_VERIFY_MOD_UBTREE: { - rc = memset_s(paramSt, sizeof(UBtreePageVerifyParams), 0, sizeof(UBtreePageVerifyParams)); - securec_check(rc, "\0", "\0"); - UBtreePageVerifyParams *params = (UBtreePageVerifyParams *) paramSt; - params->bvInfo.process = process; - params->bvInfo.vLevel = vLevel; - params->bvInfo.rel = rel; - params->page = page; - params->gpiScan = gpiScan; - finishSetParams = true; - } - break; - case USTORE_VERIFY_MOD_UNDO: { - rc = memset_s(paramSt, sizeof(UndoVerifyParams), 0, sizeof(UndoVerifyParams)); - securec_check(rc, "\0", "\0"); - UndoVerifyParams *params = (UndoVerifyParams *) paramSt; - params->bvInfo.process = process; - params->bvInfo.vLevel = vLevel; - if (subModule == USTORE_VERIFY_UNDO_SUB_UNDOZONE) { - params->subModule = UNDO_VERIFY_UNDOZONE; - params->paramVal.undoZone = uZone; - } else if (subModule == USTORE_VERIFY_UNDO_SUB_TRANSLOT) { - params->subModule = UNDO_VERIFY_TRANS_SLOT; - params->paramVal.slot= slot; - } else if (subModule == USTORE_VERIFY_UNDO_SUB_TRANSLOT_BUFFER) { - params->subModule = UNDO_VERIFY_TRANS_SLOT_BUFFER; - params->paramVal.page= page; - } - finishSetParams = true; - } - break; - case USTORE_VERIFY_MOD_REDO: { - rc = memset_s(paramSt, sizeof(URedoVerifyParams), 0, sizeof(URedoVerifyParams)); - securec_check(rc, "\0", "\0"); - URedoVerifyParams *params = (URedoVerifyParams *) paramSt; - params->pageVerifyParams.bvInfo.process = process; - params->pageVerifyParams.bvInfo.vLevel = vLevel; - params->pageVerifyParams.bvInfo.rel = rel; - params->pageVerifyParams.page = page; - params->pageVerifyParams.blk = blk; - params->pageVerifyParams.tupDesc = tupDesc; - params->latestRedo = lastestRedo; - finishSetParams = true; - } - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmodule(MOD_USTORE), - errmsg("Verify module(%u) is invalid.", module), - errdetail("N/A"), - errcause("Invalid verify module."), - erraction("Check the input paramter."))); - return false; - } - } - - return (finishSetParams) ? true : false; -} - -/* Verification entry of each ustore module. */ -bool ExecuteUstoreVerify(uint32 module, char* verifyParam) -{ - if (verifyParam == NULL) { - return false; - } - - bool analyzeVerify = (((baseVerifyInfo *) verifyParam)->process == ANALYZE_VERIFY); - uint32 mainModule = module & USTORE_VERIFY_MOD_MASK; - VerifyLevel vLevel = ((baseVerifyInfo *) verifyParam)->vLevel; - - /* Adjust the verification level. The value cannot be higher than the value of ustore_verify_level guc. */ - if (!analyzeVerify && vLevel >= u_sess->attr.attr_storage.ustore_verify_level) { - ((baseVerifyInfo *) verifyParam)->vLevel = (VerifyLevel) u_sess->attr.attr_storage.ustore_verify_level; - } - - switch (mainModule) { - case USTORE_VERIFY_MOD_UPAGE: { - UPageVerifyParams *params = (UPageVerifyParams *) verifyParam; - VerifyUPageValid(params); - } - break; - case USTORE_VERIFY_MOD_UBTREE: { - UBtreePageVerifyParams *params = (UBtreePageVerifyParams *) verifyParam; - UBTreePageVerify(params); - } - break; - case USTORE_VERIFY_MOD_UNDO: { - UndoVerifyParams *params = (UndoVerifyParams *) verifyParam; - if (params->subModule == UNDO_VERIFY_UNDOZONE) { - undo::VerifyUndoZone(params->paramVal.undoZone); - } else if (params->subModule == UNDO_VERIFY_TRANS_SLOT) { - undo::VerifyTransactionSlotValid(params->paramVal.slot); - } else if (params->subModule == UNDO_VERIFY_TRANS_SLOT_BUFFER) { - undo::VerifyTransactionSlotBuffer(params->paramVal.page); - } - } - break; - case USTORE_VERIFY_MOD_REDO: { - URedoVerifyParams *params = (URedoVerifyParams *) verifyParam; - VerifyRedoUPageValid(params); - } - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmodule(MOD_USTORE), - errmsg("Verify module(%u) is invalid.", module), - errdetail("N/A"), - errcause("Invalid verify module."), - erraction("Check the input paramter."))); - return false; - } - } - - return true; -} diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index 4b0e7a0871..e205f2b4cf 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -138,6 +138,7 @@ void PrepareUndoMeta(XlogUndoMeta *meta, UndoPersistence upersistence, UndoRecPt uzone->MarkDirty(); } uzone->AdvanceInsertURecPtr(UNDO_PTR_GET_OFFSET(lastRecord), lastRecordSize); + UndoZoneVerifyPtr(uzone); if (uzone->GetForceDiscardURecPtr() > uzone->GetInsertURecPtr()) { ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("zone %d forceDiscardURecPtr %lu > insertURecPtr %lu."), uzone->GetZoneId(), uzone->GetForceDiscardURecPtr(), uzone->GetInsertURecPtr()))); @@ -145,7 +146,8 @@ void PrepareUndoMeta(XlogUndoMeta *meta, UndoPersistence upersistence, UndoRecPt uzone->GetSlotBuffer().Lock(); BufferDesc *buf = GetBufferDescriptor(uzone->GetSlotBuffer().Buf() - 1); if (!UndoSlotBuffer::IsSlotBufferValid(buf, zid, meta->slotPtr)) { - ereport(PANIC, (errmsg(UNDOFORMAT("invalid cached slot buffer %d slot ptr %lu."), + ereport(PANIC, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("invalid cached slot buffer %d slot ptr %lu."), uzone->GetSlotBuffer().Buf(), meta->slotPtr))); } return; @@ -158,6 +160,7 @@ void FinishUndoMeta(UndoPersistence upersistence) if (uzone == NULL) { ereport(PANIC, (errmsg("FinishUndoMeta: uzone is NULL"))); } + UndoZoneVerify(uzone); uzone->GetSlotBuffer().UnLock(); uzone->UnlockUndoZone(); return; @@ -197,12 +200,6 @@ void UpdateTransactionSlot(TransactionId xid, XlogUndoMeta *meta, UndoRecPtr sta zid, meta->slotPtr, slot->XactId(), xid, slot->DbId(), u_sess->proc_cxt.MyDatabaseId))); } - uint32 verifyModule = USTORE_VERIFY_MOD_UNDO | USTORE_VERIFY_UNDO_SUB_TRANSLOT; - UndoVerifyParams verifyParam; - if (ConstructUstoreVerifyParam(verifyModule, USTORE_VERIFY_FAST, (char *) &verifyParam, NULL, - NULL, InvalidBlockNumber, InvalidOffsetNumber, NULL, NULL, InvalidXLogRecPtr, NULL, slot, false)) { - ExecuteUstoreVerify(verifyModule, (char *) &verifyParam); - } ereport(DEBUG2, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("update zone %d, slotptr %lu xid %lu dbid %u: old start %lu end %lu, new start %lu end %lu."), zid, meta->slotPtr, xid, slot->DbId(), slot->StartUndoPtr(), slot->EndUndoPtr(), @@ -215,6 +212,7 @@ void UpdateTransactionSlot(TransactionId xid, XlogUndoMeta *meta, UndoRecPtr sta meta->SetInfo(XLOG_UNDOMETA_INFO_SLOT); Assert(meta->dbid != INVALID_DB_OID); } + UndoTranslotVerifyPtr(slot, INVALID_UNDO_SLOT_PTR); return; } @@ -269,6 +267,7 @@ void RedoUndoMeta(XLogReaderState *record, XlogUndoMeta *meta, UndoRecPtr startU } UnlockReleaseBuffer(buf.Buf()); } + UndoZoneVerify(zone); return; } @@ -661,6 +660,7 @@ void RedoRollbackFinish(UndoSlotPtr slotPtr, XLogRecPtr lsn) slot->UpdateRollbackProgress(); PageSetLSN(page, lsn); MarkBufferDirty(buf.Buf()); + UndoTranslotVerify(slot, slotPtr); } UnlockReleaseBuffer(buf.Buf()); } @@ -714,7 +714,7 @@ void UpdateRollbackFinish(UndoSlotPtr slotPtr) } MarkBufferDirty(buf.Buf()); END_CRIT_SECTION(); - + UndoTranslotVerify(slot, slotPtr); UnlockReleaseBuffer(buf.Buf()); return; } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index 933c8c8756..0ac42cf997 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -93,6 +93,7 @@ bool AsyncRollback(UndoZone *zone, UndoSlotPtr recycle, TransactionSlot *slot) if (!u_sess->attr.attr_storage.enable_ustore_async_rollback) { return true; } + UndoTranslotVerify(slot, recycle); UndoRecPtr prev = GetPrevUrp(slot->EndUndoPtr()); AddRollbackRequest(slot->XactId(), prev, slot->StartUndoPtr(), slot->DbId(), recycle); @@ -941,6 +942,7 @@ void UndoRecycleMain() oldestFrozenXidInUndo, frozenXid))); oldestFrozenXidInUndo = oldestFrozenXidInUndo > frozenXid ? frozenXid : oldestFrozenXidInUndo; UpdateRecyledXid(recycleMaxXIDs, &recycleMaxXIDCount, recycleXid); + UndoZoneVerify(zone); } } } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp index f49aa57da6..65204a3351 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp @@ -455,6 +455,7 @@ void UndoSpace::RecoveryUndoSpace(int fd, UndoSpaceType type) segSize = USEG_SIZE(UNDO_DB_OID); } pg_atomic_fetch_add_u32(&g_instance.undo_cxt.undoTotalSize, usp->Used(zoneId)); + UndoZoneVerify(uzone); uint64 transUndoThresholdSize = UNDO_SPACE_THRESHOLD_PER_TRANS * BLCKSZ; const uint64 MAX_OFFSET = (UNDO_LOG_MAX_SIZE - transUndoThresholdSize) - segSize; if (usp->Tail() < usp->Head() || usp->Tail() > MAX_OFFSET) { diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp index 14294ecd09..fcc253effd 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp @@ -135,14 +135,8 @@ TransactionSlot *UndoSlotBuffer::FetchTransactionSlot(UndoSlotPtr slotPtr) } PageInit(page, BLCKSZ, 0); } - - uint32 verifyModule = USTORE_VERIFY_MOD_UNDO | USTORE_VERIFY_UNDO_SUB_TRANSLOT_BUFFER; - UndoVerifyParams verifyParam; - if (unlikely(ConstructUstoreVerifyParam(verifyModule, USTORE_VERIFY_COMPLETE, (char *) &verifyParam, - NULL, page, InvalidBlockNumber, InvalidOffsetNumber))) { - ExecuteUstoreVerify(verifyModule, (char *) &verifyParam); - } TransactionSlot *slot = (TransactionSlot *)((char *)page + slotOffset); + UndoTranslotVerifyBuffer(slotPtr); return slot; } @@ -337,48 +331,107 @@ UndoSlotPtr GetNextSlotPtr(UndoSlotPtr slotPtr) return MAKE_UNDO_PTR(UNDO_PTR_GET_ZONE_ID(slotPtr), offset); } -bool VerifyTransactionSlotValid(TransactionSlot *slot) +static void verifyXid(TransactionSlot *slot) { - if (u_sess->attr.attr_storage.ustore_verify_level <= USTORE_VERIFY_DEFAULT) { - return true; + UNDO_BYPASS_VERIFY; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + TransactionId xid = slot->XactId(); + if (!TransactionIdIsValid(xid)) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. slot xactId %lu is invalid"), xid))); + return; } - if (!TransactionIdIsValid(slot->xactId_)) { - ereport(PANIC, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("Verify TransactionSlot failed: slot xact %lu"), - slot->XactId()))); + if (TransactionIdIsValid(xid) && + TransactionIdFollowsOrEquals(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. slot xactId %lu >= nextXid%lu"), + xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } - return true; } -bool VerifyTransactionSlotBuffer(Page page) +void UndoTranslotVerifyPtr(TransactionSlot *slot, UndoSlotPtr slotPtr) { - if (u_sess->attr.attr_storage.ustore_verify_level < USTORE_VERIFY_COMPLETE) { - return true; + UNDO_BYPASS_VERIFY; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + int zoneId = (int)UNDO_PTR_GET_ZONE_ID(slot->StartUndoPtr()); + UndoZone *zone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); + if (slot->StartUndoPtr() > slot->EndUndoPtr() || slot->EndUndoPtr() > zone->GetInsertURecPtr()) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. startUndoPtr %lu , endUndoPtr %lu, zoneId %d, insertUrecPtr %lu "), + slot->StartUndoPtr(), slot->EndUndoPtr(), zoneId, zone->GetInsertURecPtr()))); + } + if (zoneId != (int)UNDO_PTR_GET_ZONE_ID(slot->EndUndoPtr()) || + (slotPtr != INVALID_UNDO_SLOT_PTR && zoneId != (int)UNDO_PTR_GET_ZONE_ID(slotPtr))) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. startUndoPtr %lu and endUndoPtr %lu have different zoneIds zid %d, insert %lu "), + slot->StartUndoPtr(), slot->EndUndoPtr(), zoneId, zone->GetInsertURecPtr()))); } +} +void UndoTranslotVerifyBuffer(UndoSlotPtr slotPtr) +{ + UNDO_BYPASS_VERIFY; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) TransactionSlot *slot = NULL; - int flag = 0; + RelFileNode rnode; + UNDO_PTR_ASSIGN_REL_FILE_NODE(rnode, slotPtr, UNDO_SLOT_DB_OID); + Buffer buffer = ReadUndoBufferWithoutRelcache(rnode, UNDO_FORKNUM, UNDO_PTR_GET_BLOCK_NUM(slotPtr), RBM_NORMAL, + NULL, RELPERSISTENCE_PERMANENT); + Page page = BufferGetPage(buffer); VerifyPageHeader(page); - + + UndoSlotOffset prevEndUndoPtr = INVALID_UNDO_SLOT_PTR; + TransactionId prevXid = InvalidTransactionId; + for (uint32 offset = UNDO_LOG_BLOCK_HEADER_SIZE; offset < BLCKSZ - MAXALIGN(sizeof(TransactionSlot)); offset += MAXALIGN(sizeof(TransactionSlot))) { - slot = (TransactionSlot *) (page + offset); - if (slot->XactId() != InvalidTransactionId || slot->StartUndoPtr() != INVALID_UNDO_REC_PTR) { - if (TransactionIdFollows(slot->XactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("slot xid invalid: slotxid = %lu, nextxid = %lu, offset %u.", - slot->XactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid, offset))); - } - if (!t_thrd.xlog_cxt.InRecovery && flag > 0) { - uint32 tempOffset = offset - flag * MAXALIGN(sizeof(TransactionSlot)); - ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid slot: num = %d, offset = %u.", flag, tempOffset))); - } - } else { - flag++; + slot = (TransactionSlot *)(page + offset); + + int zoneId = (int)UNDO_PTR_GET_ZONE_ID(slot->StartUndoPtr()); + UndoZone *zone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); + if (zone == NULL) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. zone is null. zoneId %d, startUndoPtr %lu, offset %u"), + zoneId, slot->StartUndoPtr(), offset))); + break; + } + + UndoSlotPtr currSlotPtr = slotPtr - slotPtr % BLCKSZ + offset; + if (currSlotPtr >= zone->GetAllocateTSlotPtr()) { + break; + } + + if (prevEndUndoPtr != INVALID_UNDO_SLOT_PTR && slot->StartUndoPtr() != prevEndUndoPtr) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. startUndoPtr%lu is not equal to prevEndUndoPtr %lu"), + slot->StartUndoPtr(), prevEndUndoPtr))); } + prevEndUndoPtr = slot->EndUndoPtr(); + if (TransactionIdIsValid(prevXid) && prevXid >= slot->XactId()) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. prevXid %lu >= xactId %lu"), + slot->StartUndoPtr(), prevEndUndoPtr))); + } + prevXid = slot->XactId(); + verifyXid(slot); + UndoTranslotVerifyPtr(slot, currSlotPtr); } - return true; + ReleaseBuffer(buffer); +} + +void UndoTranslotVerify(TransactionSlot *slot, UndoSlotPtr slotPtr) +{ + UNDO_BYPASS_VERIFY; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + verifyXid(slot); + UndoTranslotVerifyPtr(slot, slotPtr); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + UndoTranslotVerifyBuffer(slotPtr); } } // namespace undo diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp index 98d4ae5d2b..fa64547d82 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp @@ -1039,26 +1039,60 @@ reallocate_zone: return; } -bool VerifyUndoZone(UndoZone *uzone) +void UndoZoneVerifyPtr(UndoZone *uzone) { - if (u_sess->attr.attr_storage.ustore_verify_level <= USTORE_VERIFY_DEFAULT) { - return true; - } + UNDO_BYPASS_VERIFY; - if (uzone->GetAllocateTSlotPtr() < uzone->GetRecycleTSlotPtr() || - uzone->GetInsertURecPtr() < uzone->GetForceDiscardURecPtr() || + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); + if (uzone->GetInsertURecPtr() < uzone->GetForceDiscardURecPtr() || uzone->GetForceDiscardURecPtr() < uzone->GetDiscardURecPtr() || - (TransactionIdIsValid(uzone->GetFrozenXid()) && - (TransactionIdFollows(uzone->GetRecycleXid(), uzone->GetFrozenXid()) || - TransactionIdFollows(g_instance.undo_cxt.globalFrozenXid, uzone->GetFrozenXid()))) || - (TransactionIdIsValid(uzone->GetRecycleXid() && - TransactionIdFollows(g_instance.undo_cxt.globalRecycleXid, uzone->GetRecycleXid())))) { - ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), - "Invalid zone: allocateTSlotPtr %lu, recycleTSlotPtr %lu, frozenxid %lu, recyclexid %lu.", - uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetFrozenXid(), - uzone->GetRecycleXid())); + uzone->GetAllocateTSlotPtr() < uzone->GetRecycleTSlotPtr() || + uzone->GetUndoSpace()->Tail() < uzone->GetUndoSpace()->Head() || + uzone->GetSlotSpace()->Tail() < uzone->GetSlotSpace()->Head()) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDOZONE]failed. insertPtr %lu, forceDiscardPtr %lu, discardPtr %lu. " + "allocateTSlotPtr %lu, recycleTSlotPtr %lu, " + "undoInfo: tail %lu, head %lu. slotInfo: tail %lu, head %lu"), + uzone->GetInsertURecPtr(), uzone->GetForceDiscardURecPtr(), uzone->GetDiscardURecPtr(), + uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetUndoSpace()->Tail(), + uzone->GetUndoSpace()->Head(), uzone->GetSlotSpace()->Tail(), uzone->GetSlotSpace()->Head()))); } + LWLockRelease(UndoZoneLock); +} - return true; +void UndoZoneVerify(UndoZone *uzone) +{ + UNDO_BYPASS_VERIFY; + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + UndoZoneVerifyPtr(uzone); + + LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); + UndoLogOffset insert = UNDO_PTR_GET_OFFSET(uzone->GetInsertURecPtr()); + UndoLogOffset tail = insert + UNDO_LOG_SEGMENT_SIZE - insert % UNDO_LOG_SEGMENT_SIZE; + UndoLogOffset forceDiscard = UNDO_PTR_GET_OFFSET(uzone->GetForceDiscardURecPtr()); + UndoLogOffset head = (forceDiscard / UNDO_LOG_SEGMENT_SIZE) * UNDO_LOG_SEGMENT_SIZE; + if (tail != UNDO_PTR_GET_OFFSET(uzone->GetUndoSpace()->Tail()) || + head != UNDO_PTR_GET_OFFSET(uzone->GetUndoSpace()->Head())) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDOZONE]failed. insertPtr %lu, forceDiscardPtr %lu, tail %lu, head %lu. " + "zoneInfo: insertPtr %lu, forceDiscardPtr %lu, tail %lu, head %lu"), + insert, forceDiscard, tail, head, uzone->GetInsertURecPtr(), uzone->GetForceDiscardURecPtr(), + uzone->GetUndoSpace()->Tail(), uzone->GetUndoSpace()->Head()))); + } + UndoSlotPtr allocate = UNDO_PTR_GET_OFFSET(uzone->GetAllocateTSlotPtr()); + UndoLogOffset slotTail = (UndoLogOffset)(allocate + UNDO_META_SEGMENT_SIZE - allocate % UNDO_META_SEGMENT_SIZE); + UndoSlotPtr recycle = UNDO_PTR_GET_OFFSET(uzone->GetRecycleTSlotPtr()); + UndoLogOffset slotHead = (UndoLogOffset)((recycle / UNDO_META_SEGMENT_SIZE) * UNDO_META_SEGMENT_SIZE); + if (slotTail != UNDO_PTR_GET_OFFSET(uzone->GetSlotSpace()->Tail()) || + slotHead != UNDO_PTR_GET_OFFSET(uzone->GetSlotSpace()->Head())) { + ereport(WARNING, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("[VERIFY_UNDOZONE]failed. allocatePtr %lu, recyclePtr %lu, tail %lu, head %lu. " + "zoneInfo: allocatePtr %lu, recyclePtr %lu, tail %lu, head %lu"), + allocate, recycle, slotTail, slotHead, uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), + uzone->GetSlotSpace()->Tail(), uzone->GetSlotSpace()->Head()))); + } + LWLockRelease(UndoZoneLock); } } // namespace undo diff --git a/src/include/access/ubtree.h b/src/include/access/ubtree.h index 530b3b4a99..33c9e68499 100644 --- a/src/include/access/ubtree.h +++ b/src/include/access/ubtree.h @@ -27,6 +27,7 @@ #include "catalog/pg_index.h" #include "lib/stringinfo.h" #include "storage/buf/bufmgr.h" +#include "storage/buf/bufpage.h" #include "datatype/timestamp.h" /* @@ -504,6 +505,9 @@ typedef struct { double avgTravelQueueItems; } NewPageState; +typedef RpSort ItemIdSort; +typedef RpSortData ItemIdSortData; + #define TXNINFOSIZE (sizeof(ShortTransactionId) * 2) /* @@ -636,7 +640,6 @@ extern void UBTreeVerifyIndex(Relation rel, TupleDesc *tupDesc, Tuplestorestate extern int UBTreeVerifyOnePage(Relation rel, Page page, BTScanInsert cmpKeys, IndexTuple prevHikey); extern uint32 UBTreeVerifyRecycleQueue(Relation rel, TupleDesc *tupleDesc, Tuplestorestate *tupstore, uint32 cols); extern Buffer RecycleQueueGetEndpointPage(Relation rel, UBTRecycleForkNumber forkNumber, bool needHead, int access); -extern bool UBTreePageVerify(UBtreePageVerifyParams *verifyParams); typedef enum IndexTraceLevel { TRACE_NO = 0, @@ -662,8 +665,8 @@ extern Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumb extern void UBTreeRecycleQueueInitPage(Relation rel, Page page, BlockNumber blkno, BlockNumber prevBlkno, BlockNumber nextBlkno); extern void UBtreeRecycleQueueChangeChain(Buffer buf, BlockNumber newBlkno, bool setNext); -extern void UBTreeRecycleQueuePageChangeEndpointLeftPage(Buffer buf, bool isHead); -extern void UBTreeRecycleQueuePageChangeEndpointRightPage(Buffer buf, bool isHead); +extern void UBTreeRecycleQueuePageChangeEndpointLeftPage(Relation rel, Buffer buf, bool isHead); +extern void UBTreeRecycleQueuePageChangeEndpointRightPage(Relation rel, Buffer buf, bool isHead); extern void UBTreeXlogRecycleQueueModifyPage(Buffer buf, xl_ubtree2_recycle_queue_modify *xlrec); extern uint32 UBTreeRecycleQueuePageDump(Relation rel, Buffer buf, bool recordEachItem, TupleDesc *tupleDesc, Tuplestorestate *tupstore, uint32 cols); @@ -671,4 +674,12 @@ extern void UBTreeDumpRecycleQueueFork(Relation rel, UBTRecycleForkNumber forkNu Tuplestorestate *tupstore, uint32 cols); extern void UBTreeBuildCallback(Relation index, HeapTuple htup, Datum *values, const bool *isnull, bool tupleIsAlive, void *state); + +// verify urq +void UBTRecycleQueueVerifyPageOffline(Relation rel, Page page, BlockNumber blkno); + +// verify ubtree +void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); +void UBTreeVerifyAll(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); + #endif /* UBTREE_H */ diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index e27b3bb5c3..302f6056d8 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -49,8 +49,7 @@ #define UPageGetRowPtrOffset(_page) (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData((UHeapPageHeaderData *)_page)) -#define UPageGetRowPtr(_upage, _offsetNumber) \ - FastVerifyUPageRowPtr(UPageGenerateRowPtr(_upage, _offsetNumber), (UHeapPageHeaderData *)_upage, _offsetNumber) +#define UPageGetRowPtr(_upage, _offsetNumber) (UPageGenerateRowPtr(_upage, _offsetNumber)) #define UPageGenerateRowPtr(_upage, _offsetNumber) \ ((RowPtr *)(((char *)_upage) + SizeOfUHeapPageHeaderData + SizeOfUHeapTDData((UHeapPageHeaderData *)_upage) + \ @@ -316,8 +315,6 @@ Size PageGetUHeapFreeSpace(Page page); Size PageGetExactUHeapFreeSpace(Page page); extern UHeapFreeOffsetRanges *UHeapGetUsableOffsetRanges(Buffer buffer, UHeapTuple *tuples, int ntuples, Size saveFreeSpace); -extern bool VerifyUPageValid(UPageVerifyParams *verifyParams); -extern bool VerifyRedoUPageValid(URedoVerifyParams *verifyParams); extern bool VerifyPageHeader(Page page); extern void FastVerifyUTuple(UHeapDiskTuple diskTup, Buffer buffer); void UHeapRecordPotentialFreeSpace(Buffer buffer, int delta); @@ -344,6 +341,18 @@ inline OffsetNumber UHeapPageGetMaxOffsetNumber(char *upage) return maxoff; } -extern RowPtr *FastVerifyUPageRowPtr(RowPtr *rp, UHeapPageHeader uphdr, OffsetNumber offsetNumber); +#define USTORE_VERIFY_UPAGE_HEADER 0x01 +#define USTORE_VERIFY_UPAGE_TUPLE 0x02 +#define USTORE_VERIFY_UPAGE_ROW 0x04 +#define USTORE_VERIFY_UPAGE_ROWS 0x08 +#define USTORE_VERIFY_UPAGE_MASK 0xff + +#define USTORE_VERIFY_UPAGE_DEFAULT (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROWS) + +void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, Relation rel, + bool isRedo = false, uint8 mask = USTORE_VERIFY_UPAGE_DEFAULT, + OffsetNumber num = InvalidOffsetNumber /* for single TUPLE and ROW */); + +void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, Relation rel, bool isRedo = false); #endif diff --git a/src/include/access/ustore/knl_utuple.h b/src/include/access/ustore/knl_utuple.h index 00d50b4886..547950dea1 100644 --- a/src/include/access/ustore/knl_utuple.h +++ b/src/include/access/ustore/knl_utuple.h @@ -241,6 +241,12 @@ typedef enum LockOper { ShortTransactionIdToNormal((tup)->t_multi_base, (tup)->disk_tuple->xid) : \ ShortTransactionIdToNormal((tup)->t_xid_base, (tup)->disk_tuple->xid)) +#define UHeapDiskTupleGetRawXid(disktup, page) \ + (UHeapTupleHasMultiLockers((disktup)->flag) ? \ + ShortTransactionIdToNormal(((UHeapPageHeaderData *)(page))->pd_multi_base, (disktup)->xid) :\ + ShortTransactionIdToNormal(((UHeapPageHeaderData *)(page))->pd_xid_base, (disktup)->xid)) + + #define UHeapTupleCopyBaseFromPage(tup, page) \ do { \ (tup)->t_xid_base = ((UHeapPageHeaderData *)(page))->pd_xid_base; \ diff --git a/src/include/access/ustore/knl_uundorecord.h b/src/include/access/ustore/knl_uundorecord.h index 55f7dac86d..4fb0c890fd 100644 --- a/src/include/access/ustore/knl_uundorecord.h +++ b/src/include/access/ustore/knl_uundorecord.h @@ -396,4 +396,6 @@ UndoTraversalState FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRec bool InplaceSatisfyUndoRecord(_in_ UndoRecord *urec, _in_ BlockNumber blkno, _in_ OffsetNumber offset, _in_ TransactionId xid); +void UndoRecordVerify(_in_ UndoRecord *urec); + #endif // __KNL_UUNDORECORD_H__ diff --git a/src/include/access/ustore/knl_uundovec.h b/src/include/access/ustore/knl_uundovec.h index a9072c6237..ea8c3952b6 100644 --- a/src/include/access/ustore/knl_uundovec.h +++ b/src/include/access/ustore/knl_uundovec.h @@ -149,5 +149,4 @@ void SetUndoPageLSN(_in_ URecVector *urecvec, _in_ XLogRecPtr lsn); void ReleaseUndoBuffers(void); -void VerifyUndoRecordValid(UndoRecord *urec, bool needCheckXidInvalid = false); #endif // __KNL_UUNDOVEC_H__ diff --git a/src/include/access/ustore/knl_uverify.h b/src/include/access/ustore/knl_uverify.h index d4f25faed8..bac2755db5 100644 --- a/src/include/access/ustore/knl_uverify.h +++ b/src/include/access/ustore/knl_uverify.h @@ -40,13 +40,10 @@ #define USTORE_VERIFY_MOD_REDO 0x00080000 #define USTORE_VERIFY_MOD_MASK (USTORE_VERIFY_MOD_UPAGE | USTORE_VERIFY_MOD_UBTREE | USTORE_VERIFY_MOD_UNDO | USTORE_VERIFY_MOD_REDO) -#define DML_VERIFY 1 -#define ANALYZE_VERIFY 2 - -/* Ustore verification submodule list for a specific module. */ -#define USTORE_VERIFY_UNDO_SUB_UNDOZONE 0x0001 -#define USTORE_VERIFY_UNDO_SUB_TRANSLOT 0x0002 -#define USTORE_VERIFY_UNDO_SUB_TRANSLOT_BUFFER 0x0004 +/* Ustore urq verfication module list. */ +#define USTORE_VERIFY_URQ_SUB_HEADER 0x00020001 +#define USTORE_VERIFY_URQ_SUB_ITEM 0x00020002 +#define USTORE_VERIFY_URQ_SUB_METADATA 0x00020004 #define USTORE_VERIFY_SUB_MOD_MASK 0x0000ffff /* Ustore verification level of each modules. */ @@ -55,61 +52,45 @@ typedef enum VerifyLevel { USTORE_VERIFY_DEFAULT = 1, USTORE_VERIFY_FAST = 2, USTORE_VERIFY_COMPLETE = 3, - USTORE_VERIFY_WHITEBOX = 4 } VerifyLevel; -/* Base verify info struct for each type. */ -typedef struct baseVerifyInfo { - int process; - VerifyLevel vLevel; - Relation rel; -} baseVerifyInfo; - -/* Input params struct for upage verification. */ -typedef struct UPageVerifyParams { - baseVerifyInfo bvInfo; - Page page; - BlockNumber blk; - OffsetNumber offnum; - TupleDesc tupDesc; -} UPageVerifyParams; +#define CHECK_VERIFY_LEVEL(level)\ +{ \ + if (u_sess->attr.attr_storage.ustore_verify_level < level) { \ + return; \ + } \ +} -/* Input params struct for upage redo verification. */ -typedef struct URedoVerifyParams { - UPageVerifyParams pageVerifyParams; - XLogRecPtr latestRedo; -} URedoVerifyParams; +#define BYPASS_VERIFY(module, rel) \ +do { \ + if ((u_sess->attr.attr_storage.ustore_verify_module & module) == 0) { \ + return; \ + } \ + if (rel != NULL && !RelationIsUstoreFormat(rel) && !RelationIsUstoreIndex(rel)) { \ + return; \ + } \ +} while(0) -/* Input params struct for ubtree page verification. */ -typedef struct UBtreePageVerifyParams { - baseVerifyInfo bvInfo; - Page page; - GPIScanDesc gpiScan; -} UBtreePageVerifyParams; +#define UNDO_BYPASS_VERIFY \ +do { \ + if ((u_sess->attr.attr_storage.ustore_verify_module & USTORE_VERIFY_MOD_UNDO) == 0) { \ + return; \ + } \ +} while(0) -/* Sub-module for undo verification */ -typedef enum VerifySubModule { - UNDO_VERIFY_UNDOZONE = 0, - UNDO_VERIFY_TRANS_SLOT, - UNDO_VERIFY_TRANS_SLOT_BUFFER, - /* Add other types before the last element if needed*/ - UNDO_VERIFY_SUB_MODULE_BUTT -} VerifySubModule; +extern inline int ustore_verify_errlevel(void) +{ + return u_sess->attr.attr_storage.ustore_verify ? WARNING : ERROR; +} -/* Input params struct for undo verification. */ -typedef struct UndoVerifyParams { - baseVerifyInfo bvInfo; - VerifySubModule subModule; - union { - Page page; - undo::UndoZone *undoZone; - undo::TransactionSlot *slot; - } paramVal; -} UndoVerifyParams; +#define BEGIN_SAVE_VERIFY(tmp) \ +{ \ + temp = u_sess->attr.attr_storage.ustore_verify; \ + u_sess->attr.attr_storage.ustore_verify = true; \ +} -extern bool ConstructUstoreVerifyParam(uint32 module, VerifyLevel vLevl, char *paramSt, - Relation rel, Page page, BlockNumber blk, OffsetNumber offnum, TupleDesc tupDesc = NULL, - GPIScanDesc gpiScan = NULL, XLogRecPtr lastestRedo = InvalidXLogRecPtr, - undo::UndoZone *uZone = NULL, undo::TransactionSlot *slot = NULL, int process = 0); -extern bool ExecuteUstoreVerify(uint32 module, char* verifyParam); +#define END_SAVE_VERIFY(tmp) \ +{ \ + u_sess->attr.attr_storage.ustore_verify = tmp; \ +} #endif diff --git a/src/include/access/ustore/undo/knl_uundotxn.h b/src/include/access/ustore/undo/knl_uundotxn.h index 84ab7e79c4..c24b7ce42f 100644 --- a/src/include/access/ustore/undo/knl_uundotxn.h +++ b/src/include/access/ustore/undo/knl_uundotxn.h @@ -171,8 +171,12 @@ private: }; UndoSlotPtr GetNextSlotPtr(UndoSlotPtr slotPtr); -bool VerifyTransactionSlotValid(TransactionSlot *slot); -bool VerifyTransactionSlotBuffer(Page page); + +void UndoTranslotVerifyPtr(TransactionSlot *slot, UndoSlotPtr slotPtr); + +void UndoTranslotVerifyBuffer(UndoSlotPtr slotPtr); + +void UndoTranslotVerify(TransactionSlot *slot, UndoSlotPtr slotPtr); } // namespace undo #endif // __KNL_UUNDOTXN_H__ diff --git a/src/include/access/ustore/undo/knl_uundozone.h b/src/include/access/ustore/undo/knl_uundozone.h index 05c16159b1..82a9e8d4a1 100644 --- a/src/include/access/ustore/undo/knl_uundozone.h +++ b/src/include/access/ustore/undo/knl_uundozone.h @@ -380,8 +380,11 @@ public: void AllocateZonesBeforXid(); void InitZone(UndoZone *uzone, const int zoneId, UndoPersistence upersistence); void InitUndoSpace(UndoZone *uzone, UndoSpaceType type); -bool VerifyUndoZone(UndoZone *uzone); void exrto_recycle_residual_undo_file(char *FuncName); +void UndoZoneVerifyPtr(UndoZone *uzone); + +void UndoZoneVerify(UndoZone *uzone); + } // namespace undo #endif // __KNL_UUNDOZONE_H__ diff --git a/src/include/knl/knl_guc/knl_session_attr_storage.h b/src/include/knl/knl_guc/knl_session_attr_storage.h index f614154288..427942bf7f 100755 --- a/src/include/knl/knl_guc/knl_session_attr_storage.h +++ b/src/include/knl/knl_guc/knl_session_attr_storage.h @@ -241,6 +241,7 @@ typedef struct knl_session_attr_storage { int umax_search_length_for_prune; int ustore_verify_level; int ustore_verify_module; + bool ustore_verify; int index_trace_level; int archive_interval; bool enable_ustore_sync_rollback; -- Gitee From 694208a4e87acd334c865bc1883f6ead59c764a3 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Wed, 10 Jul 2024 21:02:00 +0800 Subject: [PATCH 028/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3timescaledb=E6=8F=92?= =?UTF-8?q?=E4=BB=B6=EF=BC=8C=E6=97=B6=E5=BA=8F=E8=A1=A8=E6=9F=A5=E7=9C=8B?= =?UTF-8?q?=E6=8C=87=E5=AE=9A=E7=9A=84SQL=E6=89=A7=E8=A1=8C=E8=AE=A1?= =?UTF-8?q?=E5=88=92=E6=97=B6=EF=BC=8C=E6=95=B0=E6=8D=AE=E5=BA=93=E5=A4=B1?= =?UTF-8?q?=E5=8E=BB=E8=BF=9E=E6=8E=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/plan/planner.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/optimizer/plan/planner.cpp b/src/gausskernel/optimizer/plan/planner.cpp index 7bceb5c5bd..7e8d3bcc89 100755 --- a/src/gausskernel/optimizer/plan/planner.cpp +++ b/src/gausskernel/optimizer/plan/planner.cpp @@ -4889,8 +4889,11 @@ static Plan* internal_grouping_planner(PlannerInfo* root, double tuple_fraction) ExtensiblePlan* expPlan1 =(ExtensiblePlan*)tsdbPlan; ExtensiblePlan* expPlan2; ListCell* expL; + expPlan1->scan.plan.exec_nodes = result_plan->exec_nodes; + foreach(expL,expPlan1->extensible_plans) { Plan* planTemp = (Plan*) lfirst(expL); + planTemp->exec_nodes = result_plan->exec_nodes; if (IsA(planTemp, ExtensiblePlan)) { expPlan2 = (ExtensiblePlan*)planTemp; @@ -4899,7 +4902,7 @@ static Plan* internal_grouping_planner(PlannerInfo* root, double tuple_fraction) result_plan->targetlist = tlist; expPlan2->extensible_plans = lappend(expPlan2->extensible_plans, result_plan); } - + } return tsdbPlan; } -- Gitee From 59e22b540efab2aa941fdad191fd4cc33fbca9e3 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Tue, 9 Jul 2024 17:40:56 +0800 Subject: [PATCH 029/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8F=91=E5=B8=83?= =?UTF-8?q?=E8=AE=A2=E9=98=85dump=E7=94=A8=E4=BE=8B=E5=A4=B1=E8=B4=A5?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/ruleutils.cpp | 14 +++++++------- src/test/subscription/env_utils.sh | 12 ++++++++++-- src/test/subscription/testcase/dump.sh | 4 ++-- src/test/subscription/testcase/types.sh | 20 ++++++++++---------- 4 files changed, 29 insertions(+), 21 deletions(-) diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index a8b0d9e91b..ef1dfdfb4a 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -1311,7 +1311,7 @@ static void AppendSubPartitionDetail(StringInfo buf, tableInfo tableinfo, Subpar "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' AND p.partstrategy = '%c' " - "ORDER BY p.boundaries[1]::%s ASC", + "ORDER BY p.boundaries[1]::%s ASC NULLS LAST", subpartinfo->subparentid, PART_OBJ_TYPE_TABLE_SUB_PARTITION, subpartinfo->subparttype, get_typename(subpartinfo->subpartkeytype)); @@ -1391,9 +1391,9 @@ static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, table tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_RANGE); for (int i = 1; i <= partkeynum; i++) { if (i == partkeynum) { - appendStringInfo(query, "p.boundaries[%d]::%s ASC", i, get_typename(iPartboundary[i - 1])); + appendStringInfo(query, "p.boundaries[%d]::%s ASC NULLS LAST", i, get_typename(iPartboundary[i - 1])); } else { - appendStringInfo(query, "p.boundaries[%d]::%s, ", i, get_typename(iPartboundary[i - 1])); + appendStringInfo(query, "p.boundaries[%d]::%s NULLS LAST, ", i, get_typename(iPartboundary[i - 1])); } } @@ -1470,7 +1470,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " - "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC", + "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC NULLS LAST", tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, get_typename(*iPartboundary)); } else { appendStringInfo(query, @@ -1478,7 +1478,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "p.bound_def AS partbound, " "p.oid AS partoid, " "t.spcname AS reltblspc FROM ( " - "SELECT oid, relname, reltablespace, pg_catalog.string_agg(bound,',' ORDER BY bound_id) AS bound_def FROM( " + "SELECT oid, relname, reltablespace, pg_catalog.string_agg(bound,',' ORDER BY bound_id NULLS LAST) AS bound_def FROM( " "SELECT oid, relname, reltablespace, bound_id, '('||" "pg_catalog.array_to_string(pg_catalog.array_agg(key_value ORDER BY key_id), ',', 'NULL')||')' AS bound " "FROM ( SELECT oid, relname, reltablespace, bound_id, key_id, "); @@ -1511,7 +1511,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c' AND boundaries[1] IS NULL) p " "LEFT JOIN pg_catalog.pg_tablespace t ON p.reltablespace = t.oid " - "ORDER BY p.bound_def ASC", + "ORDER BY p.bound_def ASC NULLS LAST", tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST); } @@ -1579,7 +1579,7 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_HASH); - appendStringInfo(query, "p.boundaries[1]::%s ASC", get_typename(*iPartboundary)); + appendStringInfo(query, "p.boundaries[1]::%s ASC NULLS LAST", get_typename(*iPartboundary)); (void)SPI_execute(query->data, true, INT_MAX); int proc = SPI_processed; diff --git a/src/test/subscription/env_utils.sh b/src/test/subscription/env_utils.sh index 071299b914..622b5740a0 100644 --- a/src/test/subscription/env_utils.sh +++ b/src/test/subscription/env_utils.sh @@ -29,18 +29,26 @@ gsctl_wait_time=3600 data_dir=$g_data_path function exec_sql(){ - result=$(gsql -d $1 -p $2 -Atq -c "$3") + execStr=$3 + if [[ $3 == "CREATE DATABASE"* ]] || [[ $3 == "DROP DATABASE"* ]]; then + execStr="set dolphin.b_compatibility_mode = off;"$3 + fi + result=$(gsql -d $1 -p $2 -Atq -c "$execStr") if [ "$result" != "" ]; then echo "$result" fi } function exec_sql_with_user() { + execStr=$3 + if [[ $3 == "CREATE DATABASE"* ]] || [[ $3 == "DROP DATABASE"* ]]; then + execStr="set dolphin.b_compatibility_mode = off;"$3 + fi local sql_user=$username if [ -n "$test_username" ]; then sql_user=$test_username fi - result=$(gsql -U $sql_user -W $passwd -d $1 -p $2 -Atq -c "$3") + result=$(gsql -U $sql_user -W $passwd -d $1 -p $2 -Atq -c "$execStr") if [ "$result" != "" ]; then echo "$result" fi diff --git a/src/test/subscription/testcase/dump.sh b/src/test/subscription/testcase/dump.sh index 6a084b326f..02a5b36395 100644 --- a/src/test/subscription/testcase/dump.sh +++ b/src/test/subscription/testcase/dump.sh @@ -46,7 +46,7 @@ function test_1() { exec_dump_db $case_db $pub_node1_port "$dump_result_dir/dump_db_pub${pub_ddl}.pub" "all" sedcmd="sed -i -e s/gauss/${g_username}/g $dump_expected_dir/dump_db_pub${pub_ddl}.pub" $sedcmd - diff $dump_result_dir/dump_db_pub${pub_ddl}.pub $dump_expected_dir/dump_db_pub${pub_ddl}.pub > ${dump_result_dir}/dump_pub${pub_ddl}_pub.diff + diff -I "dolphin.sql_mode" $dump_result_dir/dump_db_pub${pub_ddl}.pub $dump_expected_dir/dump_db_pub${pub_ddl}.pub > ${dump_result_dir}/dump_pub${pub_ddl}_pub.diff if [ -s ${dump_result_dir}/dump_puball_pub.diff ]; then echo "$failed_keyword when dump publication" exit 1 @@ -57,7 +57,7 @@ function test_1() { exec_dump_db $case_db $sub_node1_port "$dump_result_dir/dump_db_pub${pub_ddl}.sub" "all" sedcmd="sed -i -e s/gauss/${g_username}/g $dump_expected_dir/dump_db_pub${pub_ddl}.sub" $sedcmd - diff $dump_result_dir/dump_db_pub${pub_ddl}.sub $dump_expected_dir/dump_db_pub${pub_ddl}.sub > ${dump_result_dir}/dump_pub${pub_ddl}_sub.diff --ignore-matching-lines='password=encryptOpt' + diff -I "dolphin.sql_mode" $dump_result_dir/dump_db_pub${pub_ddl}.sub $dump_expected_dir/dump_db_pub${pub_ddl}.sub > ${dump_result_dir}/dump_pub${pub_ddl}_sub.diff --ignore-matching-lines='password=encryptOpt' if [ -s ${dump_result_dir}/dump_pub${pub_ddl}_sub.diff ]; then echo "$failed_keyword when dump subscription" exit 1 diff --git a/src/test/subscription/testcase/types.sh b/src/test/subscription/testcase/types.sh index e74ad2be16..653eb5fee9 100644 --- a/src/test/subscription/testcase/types.sh +++ b/src/test/subscription/testcase/types.sh @@ -147,7 +147,7 @@ function test_1() { (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_basic_t]), (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_basic_t]), (ROW(4.0, 'd', 4), ARRAY[ROW(4, 'd', 3)::tst_comp_basic_t]), - (ROW(5.0, 'e', NULL), ARRAY[NULL, ROW(5, NULL, 5)::tst_comp_basic_t]); + (ROW(5.0, 'e', 5), ARRAY[NULL, ROW(5, NULL, 5)::tst_comp_basic_t]); -- test_tbl_composite_with_enums INSERT INTO tst_comp_enum (a, b) VALUES @@ -179,7 +179,7 @@ function test_1() { (ROW(2.0, '{b, c, a}', 2), ARRAY[ROW(2, '{b, c, a}', 1)::tst_comp_enum_array_t]), (ROW(3.0, '{c, a, b}', 1), ARRAY[ROW(3, '{c, a, b}', 1)::tst_comp_enum_array_t]), (ROW(4.0, '{c, b, d}', 4), ARRAY[ROW(4, '{c, b, d}', 4)::tst_comp_enum_array_t]), - (ROW(5.0, '{c, NULL, b}', NULL), ARRAY[ROW(5, '{c, e, b}', 1)::tst_comp_enum_array_t]); + (ROW(5.0, '{c, NULL, b}', 5), ARRAY[ROW(5, '{c, e, b}', 1)::tst_comp_enum_array_t]); -- test_tbl_mixed_composites INSERT INTO tst_comp_mix_array (a, b) VALUES @@ -260,7 +260,7 @@ e|{d,NULL} (2,b,2)|{\"(2,b,2)\"} (3,c,3)|{\"(3,c,3)\"} (4,d,4)|{\"(4,d,3)\"} -(5,e,)|{NULL,\"(5,,5)\"} +(5,e,5)|{NULL,\"(5,,5)\"} 1|(1,a,1) 2|(2,b,2) 3|(3,c,3) @@ -280,7 +280,7 @@ e|{d,NULL} (2,\"{b,c,a}\",2)|{\"(2,\\\""{b,c,a}\\\"",1)\"} (3,\"{c,a,b}\",1)|{\"(3,\\\""{c,a,b}\\\"",1)\"} (4,\"{c,b,d}\",4)|{\"(4,\\\""{c,b,d}\\\"",4)\"} -(5,\"{c,NULL,b}\",)|{\"(5,\\\""{c,e,b}\\\"",1)\"} +(5,\"{c,NULL,b}\",5)|{\"(5,\\\""{c,e,b}\\\"",1)\"} (\"(1,a,1)\",\"{\"\"(1,a,1)\"\",\"\"(2,b,2)\"\"}\",a,\"{a,b,NULL,c}\")|{\"(\\\"(1,a,1)\\\",\\\"{\\\"\\\"(1,a,1)\\\"\\\",\\\"\\\"(2,b,2)\\\"\\\",NULL}\\\",a,\\\"{a,b,c}\\\")\"} 1|[1,11) 2|[2,21) @@ -379,7 +379,7 @@ e|{e,d} (2,b,2)|{\"(2,b,2)\"} (3,c,3)|{\"(3,c,3)\"} (4,d,4)|{NULL,\"(9,x,)\"} -(5,e,)|{NULL,\"(9,x,)\"} +(5,e,5)|{NULL,\"(9,x,)\"} 1|(1,,) 2|(2,b,2) 3|(3,c,3) @@ -399,7 +399,7 @@ e|{e,d} (2,\"{b,c,a}\",2)|{\"(2,\\\""{b,c,a}\\\"",1)\"} (3,\"{c,a,b}\",1)|{\"(3,\\\""{c,a,b}\\\"",1)\"} (4,\"{c,b,d}\",4)|{\"(5,\\\""{a,b,c}\\\"",5)\"} -(5,\"{c,NULL,b}\",)|{\"(5,\\\""{a,b,c}\\\"",5)\"} +(5,\"{c,NULL,b}\",5)|{\"(5,\\\""{a,b,c}\\\"",5)\"} (\"(1,a,1)\",\"{\"\"(1,a,1)\"\",\"\"(2,b,2)\"\"}\",a,\"{a,b,NULL,c}\")|{\"(\\\"(1,a,1)\\\",\\\"{\\\"\\\"(1,a,1)\\\"\\\",\\\"\\\"(2,b,2)\\\"\\\",NULL}\\\",a,\\\"{a,b,c}\\\")\",NULL} 1|[100,1001) 2|[2,21) @@ -460,9 +460,9 @@ e|{e,d} DELETE FROM tst_comp_enum_what WHERE (b[1]).b = '{c, a, b}'; DELETE FROM tst_comp_mix_array WHERE ((a).a).a = 1; DELETE FROM tst_range WHERE a = 1; - DELETE FROM tst_range WHERE '[10,20]' && b; + DELETE FROM tst_range WHERE range_overlaps('[10,20]', b); DELETE FROM tst_range_array WHERE a = 1; - DELETE FROM tst_range_array WHERE tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 05 00:00:00 2014 CEST'::timestamptz) && b; + DELETE FROM tst_range_array WHERE range_overlaps(tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 05 00:00:00 2014 CEST'::timestamptz), b); DELETE FROM tst_hstore WHERE a = 1;" wait_for_catchup $case_db $pub_node1_port "tap_sub_slot" @@ -485,7 +485,7 @@ e|{e,d} 5|(,x,-1) (2,b,2)|{\"(2,b,2)\"} (4,d,4)|{NULL,\"(9,x,)\"} -(5,e,)|{NULL,\"(9,x,)\"} +(5,e,5)|{NULL,\"(9,x,)\"} 3|(3,c,3) 4|(4,d,44) 5|(4,d,44) @@ -496,7 +496,7 @@ e|{e,d} 5|(4,\"{c,b,d}\",4) (2,\"{b,c,a}\",2)|{\"(2,\\\""{b,c,a}\\\"",1)\"} (4,\"{c,b,d}\",4)|{\"(5,\\\""{a,b,c}\\\"",5)\"} -(5,\"{c,NULL,b}\",)|{\"(5,\\\""{a,b,c}\\\"",5)\"} +(5,\"{c,NULL,b}\",5)|{\"(5,\\\""{a,b,c}\\\"",5)\"} 2|[\"2014-08-02 00:00:00+02\",\"2014-08-04 00:00:00+02\")|{\"[2,4)\",\"[20,31)\"} 3|[\"2014-08-01 00:00:00+02\",\"2014-08-04 00:00:00+02\")|{\"[3,5)\"} 2|\"updated\"=>\"value\" -- Gitee From 1241891e7d1234d113b34103639e7fd961f560aa Mon Sep 17 00:00:00 2001 From: JulianZhang Date: Thu, 11 Jul 2024 11:15:31 +0800 Subject: [PATCH 030/347] advance xlog files, remove test case --- .../regress/expected/advance_xlog_files.out | 21 ------------------- src/test/regress/parallel_schedule0 | 1 - src/test/regress/sql/advance_xlog_files.sql | 19 ----------------- 3 files changed, 41 deletions(-) delete mode 100644 src/test/regress/expected/advance_xlog_files.out delete mode 100644 src/test/regress/sql/advance_xlog_files.sql diff --git a/src/test/regress/expected/advance_xlog_files.out b/src/test/regress/expected/advance_xlog_files.out deleted file mode 100644 index 8f070cab71..0000000000 --- a/src/test/regress/expected/advance_xlog_files.out +++ /dev/null @@ -1,21 +0,0 @@ --- show wal_file_init_num, the default value should be 10 -show wal_file_init_num; - wal_file_init_num -------------------- - 10 -(1 row) - --- create test table with large row size -create table test_advance_xlog_files (c1 int, c2 varchar(500)); --- insert rows to make xlog files consumed, then trigger to init xlog files. -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -drop table test_advance_xlog_files; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 425d470759..95c8b87ee7 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -1127,4 +1127,3 @@ test: enable_expr_fusion_flatten test: on_update_session1 on_update_session2 test: ts_gb18030_utf8 -test: advance_xlog_files diff --git a/src/test/regress/sql/advance_xlog_files.sql b/src/test/regress/sql/advance_xlog_files.sql deleted file mode 100644 index 62615a0fa1..0000000000 --- a/src/test/regress/sql/advance_xlog_files.sql +++ /dev/null @@ -1,19 +0,0 @@ --- show wal_file_init_num, the default value should be 10 -show wal_file_init_num; - --- create test table with large row size -create table test_advance_xlog_files (c1 int, c2 varchar(500)); - --- insert rows to make xlog files consumed, then trigger to init xlog files. -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); -insert into test_advance_xlog_files (c1, c2) select generate_series(1, 300000), generate_series(1, 300000); - -drop table test_advance_xlog_files; \ No newline at end of file -- Gitee From 211dba76b821337408d4d03c836f4577716149e8 Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Wed, 10 Jul 2024 17:24:08 +0800 Subject: [PATCH 031/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B5=84=E6=BA=90?= =?UTF-8?q?=E6=B1=A0=E5=8C=96=E4=B8=8B=E4=B8=BB=E6=9C=BAdrop=20db=EF=BC=8C?= =?UTF-8?q?=E5=A4=87=E6=9C=BAgsc=E7=BC=93=E5=AD=98=E6=9C=AA=E6=B8=85?= =?UTF-8?q?=E7=90=86=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../backend/utils/cache/knl_globalsysdbcache.cpp | 6 ++++++ src/gausskernel/ddes/adapter/ss_transaction.cpp | 8 +++++++- src/gausskernel/storage/ipc/sinval.cpp | 8 ++++++++ src/include/storage/sinval.h | 11 +++++++++++ src/test/regress/expected/ss_r/gsc_sync_drop_db.out | 6 ++++++ src/test/regress/expected/ss_wr/gsc_sync_drop_db.out | 10 ++++++++++ src/test/regress/parallel_schedule_ss_read | 1 + src/test/regress/sql/ss_r/gsc_sync_drop_db.sql | 2 ++ src/test/regress/sql/ss_wr/gsc_sync_drop_db.sql | 5 +++++ src/test/ss/conf_start_dss_inst.sh | 4 ++-- 10 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 src/test/regress/expected/ss_r/gsc_sync_drop_db.out create mode 100644 src/test/regress/expected/ss_wr/gsc_sync_drop_db.out create mode 100644 src/test/regress/sql/ss_r/gsc_sync_drop_db.sql create mode 100644 src/test/regress/sql/ss_wr/gsc_sync_drop_db.sql diff --git a/src/common/backend/utils/cache/knl_globalsysdbcache.cpp b/src/common/backend/utils/cache/knl_globalsysdbcache.cpp index f740eebac4..8bd77754c3 100644 --- a/src/common/backend/utils/cache/knl_globalsysdbcache.cpp +++ b/src/common/backend/utils/cache/knl_globalsysdbcache.cpp @@ -47,6 +47,7 @@ #include "knl/knl_session.h" #include "utils/syscache.h" #include "storage/proc.h" +#include "storage/sinval.h" #include "funcapi.h" #include "commands/dbcommands.h" @@ -1414,5 +1415,10 @@ void NotifyGscDropDB(Oid db_id, bool need_clear) if (!EnableGlobalSysCache()) { return; } + + if (ENABLE_DMS && SS_PRIMARY_MODE && !RecoveryInProgress()) { + SSSendDropDbMsg(db_id, need_clear); + } + g_instance.global_sysdbcache.DropDB(db_id, need_clear); } diff --git a/src/gausskernel/ddes/adapter/ss_transaction.cpp b/src/gausskernel/ddes/adapter/ss_transaction.cpp index 789bf69bf4..52758ec82f 100644 --- a/src/gausskernel/ddes/adapter/ss_transaction.cpp +++ b/src/gausskernel/ddes/adapter/ss_transaction.cpp @@ -24,6 +24,7 @@ */ #include "utils/snapshot.h" #include "utils/postinit.h" +#include "utils/knl_globalsysdbcache.h" #include "storage/procarray.h" #include "storage/buf/bufmgr.h" #include "storage/smgr/segment_internal.h" @@ -624,7 +625,7 @@ void SSSendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n) Assert(ssmsg.tablespaceid != InvalidOid); } ssmsg.type = BCAST_SI; - if (msg->id >= SHAREDINVALFUNC_ID) { + if (msg->id >= SHAREDINVALDB_ID) { errno_t rc = memcpy_s(&(ssmsg.msg), sizeof(SharedInvalidationMessage), msg, sizeof(SharedInvalidationMessage)); securec_check_c(rc, "", ""); @@ -743,6 +744,11 @@ int SSProcessSharedInvalMsg(char *data, uint32 len) } SSBroadcastSI* ssmsg = (SSBroadcastSI *)data; + if (ssmsg->msg.id == SHAREDINVALDB_ID) { + NotifyGscDropDB(ssmsg->msg.db.dbId, ssmsg->msg.db.need_clear); + return DMS_SUCCESS; + } + /* process msg one by one */ if (EnableGlobalSysCache()) { SSStandbyGlobalInvalidSharedInvalidMessages(&(ssmsg->msg), ssmsg->tablespaceid); diff --git a/src/gausskernel/storage/ipc/sinval.cpp b/src/gausskernel/storage/ipc/sinval.cpp index 0f82dde70c..8cbc8859d2 100644 --- a/src/gausskernel/storage/ipc/sinval.cpp +++ b/src/gausskernel/storage/ipc/sinval.cpp @@ -269,3 +269,11 @@ void ProcessCatchupInterrupt(void) } } +void SSSendDropDbMsg(Oid dbid, bool need_clear) +{ + SharedInvalidationMessage msg; + msg.db.id = SHAREDINVALDB_ID; + msg.db.dbId = dbid; + msg.db.need_clear = need_clear; + SSSendSharedInvalidMessages(&msg, 1); +} diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h index 84fbbb4bdd..129f6f44bb 100644 --- a/src/include/storage/sinval.h +++ b/src/include/storage/sinval.h @@ -120,6 +120,15 @@ typedef struct { Oid objId; /* function ID or package ID */ } SharedInvalFuncMsg; +//Only used for drop db in shared storage cluster only +#define SHAREDINVALDB_ID (-8) + +typedef struct { + int8 id; /* type field --- must be first */ + Oid dbId; /* database ID, or 0 for shared catalogs */ + bool need_clear; /* whether need clear gsc */ +} SharedInvalDbMsg; + typedef union SharedInvalidationMessage { int8 id; /* type field --- must be first */ SharedInvalCatcacheMsg cc; @@ -130,6 +139,7 @@ typedef union SharedInvalidationMessage { SharedInvalHbktSmgrMsg hbksm; SharedInvalRelmapMsg rm; SharedInvalFuncMsg fm; + SharedInvalDbMsg db; } SharedInvalidationMessage; typedef struct _SharedInvalidationMessageEx { @@ -163,4 +173,5 @@ extern void LocalExecuteThreadAndSessionInvalidationMessage(SharedInvalidationMe extern void LocalExecuteThreadInvalidationMessage(SharedInvalidationMessage* msg); extern void LocalExecuteSessionInvalidationMessage(SharedInvalidationMessage* msg); extern void GlobalExecuteSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n); +extern void SSSendDropDbMsg(Oid dbid, bool need_clear); #endif /* SINVAL_H */ diff --git a/src/test/regress/expected/ss_r/gsc_sync_drop_db.out b/src/test/regress/expected/ss_r/gsc_sync_drop_db.out new file mode 100644 index 0000000000..c2e685828c --- /dev/null +++ b/src/test/regress/expected/ss_r/gsc_sync_drop_db.out @@ -0,0 +1,6 @@ +\c postgres +select database_name from gs_gsc_dbstat_info() where database_name like '%testdb1%'; + database_name +--------------- +(0 rows) + diff --git a/src/test/regress/expected/ss_wr/gsc_sync_drop_db.out b/src/test/regress/expected/ss_wr/gsc_sync_drop_db.out new file mode 100644 index 0000000000..e0dd783723 --- /dev/null +++ b/src/test/regress/expected/ss_wr/gsc_sync_drop_db.out @@ -0,0 +1,10 @@ +create database testdb1; +\c testdb1 +\c postgres +select database_name from gs_gsc_dbstat_info() where database_name like '%testdb1%'; + database_name +--------------- + testdb1 +(1 row) + +drop database testdb1; diff --git a/src/test/regress/parallel_schedule_ss_read b/src/test/regress/parallel_schedule_ss_read index fbba399552..5332e1f5c5 100644 --- a/src/test/regress/parallel_schedule_ss_read +++ b/src/test/regress/parallel_schedule_ss_read @@ -6,5 +6,6 @@ test: segment_subpartition_truncate #test: segment_subpartition_gpi #test: segment_subpartition_analyze_vacuum #test: segment_subpartition_select +test: gsc_sync_drop_db #ss_standby_support_write should be put as the last case test: ss_standby_support_write diff --git a/src/test/regress/sql/ss_r/gsc_sync_drop_db.sql b/src/test/regress/sql/ss_r/gsc_sync_drop_db.sql new file mode 100644 index 0000000000..54b6b1f724 --- /dev/null +++ b/src/test/regress/sql/ss_r/gsc_sync_drop_db.sql @@ -0,0 +1,2 @@ +\c postgres +select database_name from gs_gsc_dbstat_info() where database_name like '%testdb1%'; diff --git a/src/test/regress/sql/ss_wr/gsc_sync_drop_db.sql b/src/test/regress/sql/ss_wr/gsc_sync_drop_db.sql new file mode 100644 index 0000000000..79a66d5be8 --- /dev/null +++ b/src/test/regress/sql/ss_wr/gsc_sync_drop_db.sql @@ -0,0 +1,5 @@ +create database testdb1; +\c testdb1 +\c postgres +select database_name from gs_gsc_dbstat_info() where database_name like '%testdb1%'; +drop database testdb1; diff --git a/src/test/ss/conf_start_dss_inst.sh b/src/test/ss/conf_start_dss_inst.sh index 57bac8dc60..965257aee9 100644 --- a/src/test/ss/conf_start_dss_inst.sh +++ b/src/test/ss/conf_start_dss_inst.sh @@ -1,5 +1,5 @@ #!/bin/bash -INST_OFFSET=`expr $UID % 64` +INST_OFFSET=`expr $UID % 63` SIMULATE_SIZE=50000 # Unit: MB LOG_SIZE=30000 # Unit: MB declare inst_count='' @@ -155,4 +155,4 @@ function main() { start_dss ${pre_path}/dss_home ${inst_count} ${cluster_mode} } -main $@ \ No newline at end of file +main $@ -- Gitee From 11c15e382e79649ef615d29902e08b58bec63eb4 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Thu, 11 Jul 2024 16:08:53 +0800 Subject: [PATCH 032/347] =?UTF-8?q?=E5=9B=9E=E9=80=80=E6=89=A9=E9=A1=B5?= =?UTF-8?q?=E5=86=990?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/smgr/segment/data_file.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/gausskernel/storage/smgr/segment/data_file.cpp b/src/gausskernel/storage/smgr/segment/data_file.cpp index 16253c6399..593be40bb3 100644 --- a/src/gausskernel/storage/smgr/segment/data_file.cpp +++ b/src/gausskernel/storage/smgr/segment/data_file.cpp @@ -503,11 +503,7 @@ void df_extend_internal(SegLogicFile *sf) df_extend_file_vector(sf); } int new_fd; - if (ENABLE_DSS) { - new_fd = dv_open_file(filename, O_RDWR | O_CREAT | DSS_FT_NODE_FLAG_INNER_INITED, SEGMENT_FILE_MODE); - } else { - new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); - } + new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); if (new_fd < 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("[segpage] could not create file \"%s\": %m", filename))); } -- Gitee From 95747aeef44b6c76f007f10286c6f84feb8382cf Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 11 Jul 2024 20:18:09 +0800 Subject: [PATCH 033/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dunusable=E5=88=86?= =?UTF-8?q?=E5=8C=BA=E7=B4=A2=E5=BC=95=E5=90=8E=E7=9A=84core=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../runtime/executor/execUtils.cpp | 11 +++---- .../expected/gpi_set_index_unusable.out | 30 +++++++++++++++++++ .../regress/sql/gpi_set_index_unusable.sql | 30 +++++++++++++++++++ 3 files changed, 66 insertions(+), 5 deletions(-) diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp index 309ad07ed1..dd33d0d2d3 100644 --- a/src/gausskernel/runtime/executor/execUtils.cpp +++ b/src/gausskernel/runtime/executor/execUtils.cpp @@ -2014,8 +2014,6 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e unusedRelationDescs = resultRelInfo->ri_UnusableIndexRelationDescs; unusedindexInfoArray = resultRelInfo->ri_UnusableIndexRelationInfo; - totalIndices = numIndices + numUnusedIndices; - heapRelation = resultRelInfo->ri_RelationDesc; containGPI = resultRelInfo->ri_ContainGPI; @@ -2039,13 +2037,15 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e return NIL; } /* If the global partition index is included, the index insertion process needs to continue */ - if (!p->pd_part->indisusable && !containGPI) { + if (!p->pd_part->indisusable && !containGPI && !UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD) { numIndices = 0; } } else { actualheap = heapRelation; } + totalIndices = numIndices + numUnusedIndices; + if (bucketId != InvalidBktId) { searchHBucketFakeRelation(estate->esfRelations, estate->es_query_cxt, actualheap, bucketId, actualheap); } @@ -2115,8 +2115,9 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e actualindex, indexpartition, RowExclusiveLock); - // skip unusable index - if (!indexpartition->pd_part->indisusable) { + // skip unusable index except UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD is set and index is unique + if (!indexpartition->pd_part->indisusable && !(UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD && + IndexIsUnique(indexRelation->rd_index))) { continue; } } else { diff --git a/src/test/regress/expected/gpi_set_index_unusable.out b/src/test/regress/expected/gpi_set_index_unusable.out index c1634d3419..69e251b86b 100644 --- a/src/test/regress/expected/gpi_set_index_unusable.out +++ b/src/test/regress/expected/gpi_set_index_unusable.out @@ -728,4 +728,34 @@ drop index idx_cm_num_a; drop index idx_nv_num_a; drop index idx_uq_a; drop table web_returns_p_a; +-- test unusable local index +create table ALTER_TABLE_MERGE_TABLE_057 +( +c_smallint smallint, +c_varchar varchar(100) +) partition by range (c_varchar) +( +partition ALTER_TABLE_MERGE_TABLE_057_1 values less than ('a222') , +partition ALTER_TABLE_MERGE_TABLE_057_2 values less than ('a444') , +partition ALTER_TABLE_MERGE_TABLE_057_3 values less than ('a777') , +partition ALTER_TABLE_MERGE_TABLE_057_4 values less than ('a999') +); +create unique index INDEX_ALTER_TABLE_MERGE_TABLE_057_1 ON ALTER_TABLE_MERGE_TABLE_057 USING btree (c_smallint, c_varchar) local; +alter table ALTER_TABLE_MERGE_TABLE_057 modify partition ALTER_TABLE_MERGE_TABLE_057_3 unusable local indexes; +set behavior_compat_options = 'update_unusable_unique_index_on_iud'; +insert into ALTER_TABLE_MERGE_TABLE_057 values (1, 'a555'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (1, 'a555'); --error +ERROR: duplicate key value violates unique constraint "index_alter_table_merge_table_057_1" +DETAIL: Key (c_smallint, c_varchar)=(1, a555) already exists. +insert into ALTER_TABLE_MERGE_TABLE_057 values (2, 'a111'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (2, 'a111'); --error +ERROR: duplicate key value violates unique constraint "index_alter_table_merge_table_057_1" +DETAIL: Key (c_smallint, c_varchar)=(2, a111) already exists. +reset behavior_compat_options; +insert into ALTER_TABLE_MERGE_TABLE_057 values (1, 'a555'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (3, 'a333'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (3, 'a333'); --error +ERROR: duplicate key value violates unique constraint "index_alter_table_merge_table_057_1" +DETAIL: Key (c_smallint, c_varchar)=(3, a333) already exists. +drop table ALTER_TABLE_MERGE_TABLE_057; -- End. Clean u diff --git a/src/test/regress/sql/gpi_set_index_unusable.sql b/src/test/regress/sql/gpi_set_index_unusable.sql index 9a7aa9c7ad..daac844137 100644 --- a/src/test/regress/sql/gpi_set_index_unusable.sql +++ b/src/test/regress/sql/gpi_set_index_unusable.sql @@ -392,4 +392,34 @@ drop index idx_nv_num_a; drop index idx_uq_a; drop table web_returns_p_a; +-- test unusable local index +create table ALTER_TABLE_MERGE_TABLE_057 +( +c_smallint smallint, +c_varchar varchar(100) +) partition by range (c_varchar) +( +partition ALTER_TABLE_MERGE_TABLE_057_1 values less than ('a222') , +partition ALTER_TABLE_MERGE_TABLE_057_2 values less than ('a444') , +partition ALTER_TABLE_MERGE_TABLE_057_3 values less than ('a777') , +partition ALTER_TABLE_MERGE_TABLE_057_4 values less than ('a999') +); + +create unique index INDEX_ALTER_TABLE_MERGE_TABLE_057_1 ON ALTER_TABLE_MERGE_TABLE_057 USING btree (c_smallint, c_varchar) local; +alter table ALTER_TABLE_MERGE_TABLE_057 modify partition ALTER_TABLE_MERGE_TABLE_057_3 unusable local indexes; + +set behavior_compat_options = 'update_unusable_unique_index_on_iud'; +insert into ALTER_TABLE_MERGE_TABLE_057 values (1, 'a555'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (1, 'a555'); --error + +insert into ALTER_TABLE_MERGE_TABLE_057 values (2, 'a111'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (2, 'a111'); --error + +reset behavior_compat_options; +insert into ALTER_TABLE_MERGE_TABLE_057 values (1, 'a555'); + +insert into ALTER_TABLE_MERGE_TABLE_057 values (3, 'a333'); +insert into ALTER_TABLE_MERGE_TABLE_057 values (3, 'a333'); --error + +drop table ALTER_TABLE_MERGE_TABLE_057; -- End. Clean u -- Gitee From 276b9636f92f3592c6a72d8214f0ac08578bbad2 Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Thu, 11 Jul 2024 20:22:39 +0800 Subject: [PATCH 034/347] =?UTF-8?q?trc=5Fgen.py=E7=94=9F=E6=88=90=E8=84=9A?= =?UTF-8?q?=E6=9C=AC=E6=94=B9=E4=B8=BApython3=E6=89=A7=E8=A1=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/lib/gstrace/CMakeLists.txt | 2 +- src/lib/gstrace/Makefile | 2 +- src/lib/gstrace/script/trc_gen.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/gstrace/CMakeLists.txt b/src/lib/gstrace/CMakeLists.txt index 4551e767cb..803f369e95 100755 --- a/src/lib/gstrace/CMakeLists.txt +++ b/src/lib/gstrace/CMakeLists.txt @@ -1,6 +1,6 @@ #This is the main CMAKE for build all components. execute_process( - COMMAND python ./script/trc_gen.py -s ./config/ -o ${CMAKE_CURRENT_SOURCE_DIR}/../../include/gstrace + COMMAND python3 ./script/trc_gen.py -s ./config/ -o ${CMAKE_CURRENT_SOURCE_DIR}/../../include/gstrace WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) diff --git a/src/lib/gstrace/Makefile b/src/lib/gstrace/Makefile index 048612ba23..959f4c4bee 100644 --- a/src/lib/gstrace/Makefile +++ b/src/lib/gstrace/Makefile @@ -45,7 +45,7 @@ all: $(HEADERFILE) install: all $(HEADERFILE): $(CONFIGFILE) - python ./script/trc_gen.py -s ./config/ -o ./../../include/gstrace + python3 ./script/trc_gen.py -s ./config/ -o ./../../include/gstrace clean: rm -f ./../../include/gstrace/*_gstrace.h diff --git a/src/lib/gstrace/script/trc_gen.py b/src/lib/gstrace/script/trc_gen.py index 7abebbdcce..572bdde618 100644 --- a/src/lib/gstrace/script/trc_gen.py +++ b/src/lib/gstrace/script/trc_gen.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # coding=utf-8 # # Copyright (c) 2020 Huawei Technologies Co.,Ltd. -- Gitee From 6b2e0b87869ca53af9a4896b39c81766022b1367 Mon Sep 17 00:00:00 2001 From: hejiahuan11 Date: Tue, 9 Jul 2024 20:45:22 +0800 Subject: [PATCH 035/347] vgname-log --- src/bin/pg_probackup/util.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/bin/pg_probackup/util.cpp b/src/bin/pg_probackup/util.cpp index 2cd684c6d0..93a30d70cc 100644 --- a/src/bin/pg_probackup/util.cpp +++ b/src/bin/pg_probackup/util.cpp @@ -532,18 +532,20 @@ get_redo(const char *pgdata_path, RedoParams *redo) } -void -parse_vgname_args(const char* args) +void parse_vgname_args(const char* args) { char *vgname = xstrdup(args); if (strstr(vgname, "/") != NULL) elog(ERROR, "invalid token \"/\" in vgname"); + /* Check vgname args */ char *comma = strstr(vgname, ","); if (comma == NULL) { - instance_config.dss.vgdata = vgname; - instance_config.dss.vglog = const_cast(""); - return; + elog(ERROR, "invalid vgname args, there is at least one \",\" in vgname\n" + "insufficient vgname args, check the number of args, example: --vgname=\"+data,+log\""); + } + if (*(comma + 1) == '\0' ){ + elog(ERROR, "insufficient vgname args, check the number of args, example: --vgname=\"+data,+log\""); } instance_config.dss.vgdata = xstrdup(vgname); @@ -556,8 +558,7 @@ parse_vgname_args(const char* args) elog(ERROR, "invalid vgname args, should be two volume group names, example: \"+data,+log\""); } -bool -is_ss_xlog(const char *ss_dir) +bool is_ss_xlog(const char *ss_dir) { char ss_xlog[MAXPGPATH] = {0}; char ss_notify[MAXPGPATH] = {0}; -- Gitee From e628f4c1c587e5ffa8abf665d4c1cc561ca1e464 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Fri, 12 Jul 2024 14:27:46 +0800 Subject: [PATCH 036/347] =?UTF-8?q?MySql=E5=85=BC=E5=AE=B9=E6=80=A7?= =?UTF-8?q?=E5=A2=9E=E5=BC=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_dump.cpp | 20 +++++++++++--- src/bin/psql/common.cpp | 30 +++++++++++++++++++++ src/bin/psql/common.h | 3 +++ src/bin/psql/describe.cpp | 9 ++++++- src/common/backend/catalog/dependency.cpp | 4 +-- src/common/backend/nodes/copyfuncs.cpp | 2 ++ src/common/backend/nodes/equalfuncs.cpp | 2 ++ src/common/backend/nodes/outfuncs.cpp | 6 +++++ src/common/backend/nodes/readfuncs.cpp | 4 +++ src/common/backend/utils/init/globals.cpp | 3 ++- src/gausskernel/optimizer/path/joinrels.cpp | 18 ++++++++++--- src/include/knl/knl_session.h | 1 + src/include/miscadmin.h | 1 + src/include/nodes/nodes.h | 1 + src/include/nodes/parsenodes.h | 1 + src/include/nodes/primnodes.h | 1 + src/include/nodes/relation.h | 1 + 17 files changed, 97 insertions(+), 10 deletions(-) diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index a6d256110c..8a018f8ddc 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -9121,6 +9121,7 @@ EventInfo* getEvents(Archive *fout, int *numEvents) int i_nspname; char* database_name = PQdb(GetConnection(fout)); bool is_bcompatibility = findDBCompatibility(fout, PQdb(GetConnection(fout))); + const char *intervalStr = NULL; if (GetVersionNum(fout) < EVENT_VERSION) { return NULL; @@ -9129,12 +9130,18 @@ EventInfo* getEvents(Archive *fout, int *numEvents) selectSourceSchema(fout, "pg_catalog"); query = createPQExpBuffer(); if (is_bcompatibility) { + if (hasSpecificExtension(fout, "dolphin")) { + intervalStr = "`interval`"; + } else { + intervalStr = "interval"; + } appendPQExpBuffer( query, "SELECT pg_job.oid, job_id, log_user, job_name, pg_job.nspname, pg_namespace.oid, dbname, start_date, " - "end_date, interval, enable " + "end_date, %s, enable " "FROM pg_job LEFT join pg_namespace on pg_namespace.nspname = pg_job.nspname where dbname=\'%s\'", - database_name); + intervalStr, database_name); + res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -18511,12 +18518,19 @@ static PQExpBuffer createTablePartition(Archive* fout, TableInfo* tbinfo) int cnt; bool partkeyexprIsNull = PartkeyexprIsNull(fout, tbinfo, false); + const char *intervalStr = NULL; /* get partitioned table info */ + if (findDBCompatibility(fout, PQdb(GetConnection(fout))) && hasSpecificExtension(fout, "dolphin")) { + intervalStr = "`interval`"; + } else { + intervalStr = "interval"; + } appendPQExpBuffer(defq, - "SELECT partstrategy, interval[1], " + "SELECT partstrategy, %s[1], " "pg_catalog.array_length(partkey, 1) AS partkeynum, partkey, " "pg_catalog.array_length(intervaltablespace, 1) AS inttblspcnum, intervaltablespace " "FROM pg_partition WHERE parentid = '%u' AND parttype = '%c'", + intervalStr, tbinfo->dobj.catId.oid, PART_OBJ_TYPE_PARTED_TABLE); res = ExecuteSqlQueryForSingleRow(fout, defq->data); diff --git a/src/bin/psql/common.cpp b/src/bin/psql/common.cpp index 6936c85b16..5d87b31fae 100644 --- a/src/bin/psql/common.cpp +++ b/src/bin/psql/common.cpp @@ -2703,3 +2703,33 @@ char* GetEnvStr(const char* env) } return NULL; } + +bool CheckDBCompatibility(PGconn *connection, char *dbCompatibility) +{ + bool isCompatibility = false; + PGresult *res = PQexec(connection, "select setting from pg_catalog.pg_settings where name = 'sql_compatibility'"); + + isCompatibility = (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK && + PQntuples(res) == 1 && strcmp(PQgetvalue(res, 0, 0), dbCompatibility) == 0); + + PQclear(res); + return isCompatibility; +} + +bool CheckSpecificExtension(PGconn *connection, char *extension) +{ + bool isHasExtension = false; + char sql[100] = {0}; + int rc = 0; + + rc = sprintf_s(sql, sizeof(sql), "SELECT extname from pg_catalog.pg_extension where extname = '%s'", extension); + securec_check_ss_c(rc, "\0", "\0"); + + PGresult *res = PQexec(connection, sql); + + isHasExtension = (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK && + PQntuples(res) == 1); + + PQclear(res); + return isHasExtension; +} \ No newline at end of file diff --git a/src/bin/psql/common.h b/src/bin/psql/common.h index 7a53dbd4cc..3d6d1efb60 100644 --- a/src/bin/psql/common.h +++ b/src/bin/psql/common.h @@ -108,4 +108,7 @@ extern void expand_tilde(char** filename); extern bool do_parallel_execution(int count, char** stmts); extern char* GetEnvStr(const char* env); +extern bool CheckDBCompatibility(PGconn* connection, char *dbCompatibility); +extern bool CheckSpecificExtension(PGconn* connection, char *extension); + #endif /* COMMON_H */ diff --git a/src/bin/psql/describe.cpp b/src/bin/psql/describe.cpp index 29f9af2fb1..db8bf68461 100644 --- a/src/bin/psql/describe.cpp +++ b/src/bin/psql/describe.cpp @@ -2855,8 +2855,15 @@ static bool describeOneTableDetails(const char* schemaname, const char* relation * Show information about partition table. * 1. Get the partition key postition and partition strategy from pg_partition. */ + const char *intervalStr = NULL; + if (CheckDBCompatibility(pset.db, "B") && CheckSpecificExtension(pset.db, "dolphin")) { + intervalStr = "`interval`"; + } else { + intervalStr = "interval"; + } printfPQExpBuffer(&buf, - "select partkey,partstrategy,interval[1] from pg_partition where parentid = %s and parttype = 'r'", oid); + "select partkey,partstrategy, %s[1] from pg_partition where parentid = %s and parttype = 'r'", + intervalStr, oid); PGresult *tmp_result = NULL; result = PSQLexec(buf.data, false); diff --git a/src/common/backend/catalog/dependency.cpp b/src/common/backend/catalog/dependency.cpp index 9ae1da8078..89cf82de1e 100644 --- a/src/common/backend/catalog/dependency.cpp +++ b/src/common/backend/catalog/dependency.cpp @@ -951,7 +951,7 @@ void reportDependentObjects( * In restrict mode, we check targetObjects, remove object entries related to views from targetObjects, * and ensure that no errors are reported due to deleting table fields that have view references. */ - if (behavior == DROP_RESTRICT && origObject != NULL && origObject->objectSubId != 0) { + if (behavior == DROP_RESTRICT && origObject != NULL && (origObject->objectSubId != 0 || u_sess->attr.attr_sql.dolphin)) { ObjectAddresses* newTargetObjects = new_object_addresses(); const ObjectAddress* originalObj = NULL; const int typeOidOffset = 2; @@ -974,7 +974,7 @@ void reportDependentObjects( (originalObj->objectId + typeOidOffset) == obj->objectId)) { // delete pg_type entry add_exact_object_address_extra(obj, extra, newTargetObjects); - } else if (objClass != OCLASS_REWRITE) { // delete constraint and so on + } else if (objClass != OCLASS_REWRITE || (u_sess->attr.attr_sql.dolphin && extra->dependee.objectId == origObject->objectId)) { // delete constraint and so on add_exact_object_address_extra(obj, extra, newTargetObjects); } } diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 655d89b166..6087420274 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -3388,6 +3388,7 @@ static JoinExpr* _copyJoinExpr(const JoinExpr* from) COPY_NODE_FIELD(quals); COPY_NODE_FIELD(alias); COPY_SCALAR_FIELD(rtindex); + COPY_SCALAR_FIELD(is_straight_join); return newnode; } @@ -3657,6 +3658,7 @@ static SpecialJoinInfo* _copySpecialJoinInfo(const SpecialJoinInfo* from) COPY_SCALAR_FIELD(lhs_strict); COPY_SCALAR_FIELD(delay_upper_joins); COPY_NODE_FIELD(join_quals); + COPY_SCALAR_FIELD(is_straight_join); return newnode; } diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index c2b4440e8e..ea71be9114 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -730,6 +730,7 @@ static bool _equalJoinExpr(const JoinExpr* a, const JoinExpr* b) COMPARE_NODE_FIELD(quals); COMPARE_NODE_FIELD(alias); COMPARE_SCALAR_FIELD(rtindex); + COMPARE_SCALAR_FIELD(is_straight_join); return true; } @@ -838,6 +839,7 @@ static bool _equalSpecialJoinInfo(const SpecialJoinInfo* a, const SpecialJoinInf COMPARE_SCALAR_FIELD(lhs_strict); COMPARE_SCALAR_FIELD(delay_upper_joins); COMPARE_NODE_FIELD(join_quals); + COMPARE_SCALAR_FIELD(is_straight_join); return true; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 5e4ef26ebd..0db506b908 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -3190,6 +3190,9 @@ static void _outJoinExpr(StringInfo str, JoinExpr* node) WRITE_NODE_FIELD(quals); WRITE_NODE_FIELD(alias); WRITE_INT_FIELD(rtindex); + if (t_thrd.proc->workingVersionNum >= STRAIGHT_JOIN_VERSION_NUMBER) { + WRITE_BOOL_FIELD(is_straight_join); + } } static void _outFromExpr(StringInfo str, FromExpr* node) @@ -3829,6 +3832,9 @@ static void _outSpecialJoinInfo(StringInfo str, SpecialJoinInfo* node) WRITE_BOOL_FIELD(lhs_strict); WRITE_BOOL_FIELD(delay_upper_joins); WRITE_NODE_FIELD(join_quals); + if (t_thrd.proc->workingVersionNum >= STRAIGHT_JOIN_VERSION_NUMBER) { + WRITE_BOOL_FIELD(is_straight_join); + } } static void diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index eb15a3ee1c..ad5e521b50 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -3109,6 +3109,9 @@ static JoinExpr* _readJoinExpr(void) READ_NODE_FIELD(quals); READ_NODE_FIELD(alias); READ_INT_FIELD(rtindex); + IF_EXIST(is_straight_join) { + READ_BOOL_FIELD(is_straight_join); + } READ_DONE(); } @@ -3253,6 +3256,7 @@ static RangeTblEntry* _readRangeTblEntry(void) (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("NULL relnamespace for RTE %u found", local_node->relid))); } + local_node->relnamespace = relnamespace; } /* * Same reason as above, get synOid for distribution plan. diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 3904cc8820..67931c80a2 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -75,12 +75,13 @@ bool will_shutdown = false; * NEXT | 92899 | ? | ? * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92938; +const uint32 GRAND_VERSION_NUM = 92939; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 STRAIGHT_JOIN_VERSION_NUMBER = 92939; const uint32 ROTATE_UNROTATE_VERSION_NUM = 92937; const uint32 PIPELINED_FUNCTION_VERSION_NUM = 92936; const uint32 DISABLE_CONSTRAINT_VERSION_NUM = 92931; diff --git a/src/gausskernel/optimizer/path/joinrels.cpp b/src/gausskernel/optimizer/path/joinrels.cpp index 228e96dfe3..568fa08747 100755 --- a/src/gausskernel/optimizer/path/joinrels.cpp +++ b/src/gausskernel/optimizer/path/joinrels.cpp @@ -303,10 +303,11 @@ static void make_rels_by_clauseless_joins(PlannerInfo* root, RelOptInfo* old_rel * match the SpecialJoinInfo node. */ static bool join_is_legal(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2, Relids joinrelids, - SpecialJoinInfo** sjinfo_p, bool* reversed_p) + SpecialJoinInfo** sjinfo_p, bool* reversed_p, bool* straight_join_p) { SpecialJoinInfo* match_sjinfo = NULL; bool reversed = false; + bool straight_join = false; bool unique_exchange = false; bool must_be_leftjoin = false; bool lateral_fwd = false; @@ -319,6 +320,7 @@ static bool join_is_legal(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2, */ *sjinfo_p = NULL; *reversed_p = false; + *straight_join_p = false; /* * If we have any special joins, the proposed join might be illegal; and @@ -328,6 +330,9 @@ static bool join_is_legal(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2, foreach (l, root->join_info_list) { SpecialJoinInfo* sjinfo = (SpecialJoinInfo*)lfirst(l); + if (sjinfo->is_straight_join) { + straight_join = true; + } /* * This special join is not relevant unless its RHS overlaps the * proposed join. (Check this first as a fast path for dismissing @@ -531,6 +536,7 @@ static bool join_is_legal(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2, /* Otherwise, it's a valid join */ *sjinfo_p = match_sjinfo; *reversed_p = reversed; + *straight_join_p = straight_join; return true; } @@ -551,6 +557,7 @@ RelOptInfo* make_join_rel(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2) Relids joinrelids; SpecialJoinInfo* sjinfo = NULL; bool reversed = false; + bool straight_join = false; SpecialJoinInfo sjinfo_data; RelOptInfo* joinrel = NULL; List* restrictlist = NIL; @@ -562,7 +569,7 @@ RelOptInfo* make_join_rel(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2) joinrelids = bms_union(rel1->relids, rel2->relids); /* Check validity and determine join type. */ - if (!join_is_legal(root, rel1, rel2, joinrelids, &sjinfo, &reversed)) { + if (!join_is_legal(root, rel1, rel2, joinrelids, &sjinfo, &reversed, &straight_join)) { /* invalid join path */ bms_free_ext(joinrelids); return NULL; @@ -591,6 +598,7 @@ RelOptInfo* make_join_rel(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2) sjinfo->jointype = JOIN_INNER; /* we don't bother trying to make the remaining fields valid */ sjinfo->lhs_strict = false; + sjinfo->is_straight_join = straight_join; sjinfo->delay_upper_joins = false; sjinfo->join_quals = NIL; } @@ -650,6 +658,9 @@ RelOptInfo* make_join_rel(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2) break; } add_paths_to_joinrel(root, joinrel, rel1, rel2, JOIN_INNER, sjinfo, restrictlist); + if (u_sess->attr.attr_sql.dolphin && sjinfo->is_straight_join) { + break; + } add_paths_to_joinrel(root, joinrel, rel2, rel1, JOIN_INNER, sjinfo, restrictlist); break; case JOIN_LEFT: @@ -956,10 +967,11 @@ static bool has_legal_joinclause(PlannerInfo* root, RelOptInfo* rel) Relids joinrelids; SpecialJoinInfo* sjinfo = NULL; bool reversed = false; + bool straight_join = false; /* join_is_legal needs relids of the union */ joinrelids = bms_union(rel->relids, rel2->relids); - if (join_is_legal(root, rel, rel2, joinrelids, &sjinfo, &reversed)) { + if (join_is_legal(root, rel, rel2, joinrelids, &sjinfo, &reversed, &straight_join)) { /* Yes, this will work */ bms_free_ext(joinrelids); return true; diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index ac8ef4f289..7de69ab9c1 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -458,6 +458,7 @@ typedef struct knl_u_parser_context { bool in_userset; bool has_set_uservar; bool has_equal_uservar; + bool is_straight_join; } knl_u_parser_context; typedef struct knl_u_trigger_context { diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index f6d569bad4..3fec065099 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -147,6 +147,7 @@ extern const uint32 PRIOR_EXPR_VERSION_NUM; extern const uint32 CURSOR_EXPRESSION_VERSION_NUMBER; extern const uint32 ROTATE_UNROTATE_VERSION_NUM; extern const uint32 FLOAT_VERSION_NUMBER; +extern const uint32 STRAIGHT_JOIN_VERSION_NUMBER; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index a29b9c0e85..141fe1baf0 100755 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -1130,6 +1130,7 @@ typedef enum JoinType { /* * We might need additional join types someday. */ + JOIN_STRAIGHT } JoinType; /* diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index 73ad036a8b..fd2099a58a 100755 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -332,6 +332,7 @@ typedef struct RangeTblEntry { Alias* eref; /* expanded reference names */ Alias* pname; /* partition name, if any */ List* plist; + char* relnamespace; bool lateral; /* subquery or function is marked LATERAL? */ bool inh; /* inheritance requested? */ bool inFromCl; /* present in FROM clause? */ diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 45e67f5d20..e3924471ad 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -1382,6 +1382,7 @@ typedef struct JoinExpr { Node* quals; /* qualifiers on join, if any */ Alias* alias; /* user-written alias clause, if any */ int rtindex; /* RT index assigned for join, or 0 */ + bool is_straight_join; /* set true if straight_join */ } JoinExpr; /* ---------- diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index c97b0664ff..53190e5288 100755 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -1948,6 +1948,7 @@ typedef struct SpecialJoinInfo { bool delay_upper_joins; /* can't commute with upper RHS */ List* join_quals; /* join quals, in implicit-AND list format */ bool varratio_cached; /* decide chach selec or not. */ + bool is_straight_join; /* set true if is straight_join*/ } SpecialJoinInfo; /* -- Gitee From 9c4a57af944c66cdae6d10fde019f7e8c6d66367 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Wed, 10 Jul 2024 18:43:57 +0800 Subject: [PATCH 037/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dcopy=E8=AF=AD?= =?UTF-8?q?=E5=8F=A5WHEN=E8=A2=ABwhen=5Fexpr=E5=AD=97=E7=AC=A6=E4=B8=B2?= =?UTF-8?q?=E6=B3=A8=E5=85=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/gram.y | 3 +++ src/test/regress/expected/copyselect.out | 7 +++++++ src/test/regress/sql/copyselect.sql | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index bf6dde2f94..9cb9518da9 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -6151,6 +6151,9 @@ copy_generic_opt_list: copy_generic_opt_elem: ColLabel copy_generic_opt_arg { + /* Character "when_expr" may be injected as "COPY ... WHEN ... "*/ + if (pg_strcasecmp($1, "when_expr") == 0) + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("option \"%s\" not recognized", $1))); $$ = makeDefElem($1, $2); } ; diff --git a/src/test/regress/expected/copyselect.out b/src/test/regress/expected/copyselect.out index 1aaa75897f..1fdfa3b56f 100644 --- a/src/test/regress/expected/copyselect.out +++ b/src/test/regress/expected/copyselect.out @@ -60,6 +60,13 @@ ERROR: syntax error at or near "(" LINE 1: copy (select * from test1) (t,id) to stdout; ^ -- +-- Test when_expr +-- +copy (select * from test1) to stdout with (when_expr); -- error +ERROR: option "when_expr" not recognized +copy (select * from test1) to stdout with (when_expr''); -- error +ERROR: option "when_expr" not recognized +-- -- Test JOIN -- copy (select * from test1 join test2 using (id) order by 1) to stdout; diff --git a/src/test/regress/sql/copyselect.sql b/src/test/regress/sql/copyselect.sql index 92d64c5c6e..34cfb8265b 100644 --- a/src/test/regress/sql/copyselect.sql +++ b/src/test/regress/sql/copyselect.sql @@ -47,6 +47,11 @@ copy (select * from test1) from stdin; -- copy (select * from test1) (t,id) to stdout; -- +-- Test when_expr +-- +copy (select * from test1) to stdout with (when_expr); -- error +copy (select * from test1) to stdout with (when_expr''); -- error +-- -- Test JOIN -- copy (select * from test1 join test2 using (id) order by 1) to stdout; -- Gitee From bb837af9289f2c58c9cf6602ff4324aad231762f Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Fri, 12 Jul 2024 15:47:07 +0800 Subject: [PATCH 038/347] =?UTF-8?q?bugfix=EF=BC=9Atuplestore=E5=86=99?= =?UTF-8?q?=E5=85=A5file=E6=97=B6=EF=BC=8Cmemcount=E4=B8=8D=E4=BC=9A?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E3=80=82=E5=AF=BC=E8=87=B4=E8=8E=B7=E5=8F=96?= =?UTF-8?q?memcount=E5=B0=8F=E4=BA=8E=E5=85=83=E7=BB=84=E6=95=B0=E9=87=8F?= =?UTF-8?q?=EF=BC=8C=E5=87=BA=E7=8E=B0=E8=B8=A9=E5=86=85=E5=AD=98=E9=94=99?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_clause.cpp | 2 +- src/common/pl/plpgsql/src/pl_exec.cpp | 27 ++++++--- src/gausskernel/runtime/executor/execQual.cpp | 5 +- src/gausskernel/storage/buffer/bufmgr.cpp | 1 - .../storage/replication/syncrep.cpp | 59 ------------------- .../expected/plpgsql/plpgsql_normal.out | 20 ++++++- .../regress/sql/plpgsql/plpgsql_normal.sql | 4 ++ 7 files changed, 48 insertions(+), 70 deletions(-) diff --git a/src/common/backend/parser/parse_clause.cpp b/src/common/backend/parser/parse_clause.cpp index 800402a2f9..31fb09798c 100644 --- a/src/common/backend/parser/parse_clause.cpp +++ b/src/common/backend/parser/parse_clause.cpp @@ -624,7 +624,7 @@ static RangeTblEntry* transformRangeSubselect(ParseState* pstate, RangeSubselect SelectStmt *subQueryStmt; ColumnRef *cref; ResTarget *resT; - List *exprlist, *filterlist; + List *filterlist; subQueryStmt = (SelectStmt *)r->subquery; prev = NULL; if (1 == list_length(subQueryStmt->targetList)) { diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index c94d150eb3..7a032d7574 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -2095,9 +2095,8 @@ void AutonomPipelinedFuncRewriteResult(PLpgSQL_execstate *estate) } Tuplestorestate *tuple_store = estate->tuple_store; - int size = tuplestore_get_memtupcount(tuple_store); - Datum *values = (Datum *)palloc(sizeof(Datum) * size); - bool *isNulls = (bool *)palloc(sizeof(bool) * size); + List* null_list = NIL; + List* datum_list = NIL; int32 elemTypeMod; Oid elemTypeId = searchsubtypebytypeId(estate->fn_rettype, &elemTypeMod); @@ -2111,14 +2110,16 @@ void AutonomPipelinedFuncRewriteResult(PLpgSQL_execstate *estate) int index = 0; while (tuplestore_gettupleslot(tuple_store, true, false, slot)) { Datum datum; + bool is_null = false; if (estate->pipelined_resistuple) { - isNulls[index] = false; datum = ExecFetchSlotTupleDatum(slot); } else { - datum = heap_slot_getattr(slot, 1, &isNulls[index]); + datum = heap_slot_getattr(slot, 1, &is_null); } - values[index] = datumCopy(datum, elemByVal, elemLen); + null_list = lappend_int(null_list, (int)is_null); + datum_list = lappend(datum_list, (void*)datumCopy(datum, elemByVal, elemLen)); index++; + ExecClearTuple(slot); } tuplestore_end(tuple_store); @@ -2128,8 +2129,20 @@ void AutonomPipelinedFuncRewriteResult(PLpgSQL_execstate *estate) int dims[1]; int lbs[1]; - dims[0] = size; + dims[0] = index; lbs[0] = 1; + Datum *values = (Datum *)palloc(sizeof(Datum) * index); + bool *isNulls = (bool *)palloc(sizeof(bool) * index); + ListCell* null_cell = NULL; + ListCell* datum_cell = NULL; + int loop_index = 0; + forboth(null_cell, null_list, datum_cell, datum_list) { + isNulls[loop_index] = (bool)lfirst_int(null_cell); + values[loop_index] = (Datum)lfirst(datum_cell); + loop_index++; + } + list_free(null_list); + list_free(datum_list); estate->retval = PointerGetDatum(construct_md_array(values, isNulls, 1, dims, lbs, elemTypeId, elemLen, elemByVal, elemAlign)); } else { diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index ba8a50db3a..2b6027e0f8 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -3071,6 +3071,7 @@ Tuplestorestate* ExecMakeTableFunctionResult( int* var_dno = NULL; bool has_refcursor = false; bool has_out_param = false; + bool is_pipelined = false; FuncExpr *fexpr = NULL; bool savedIsSTP = u_sess->SPI_cxt.is_stp; @@ -3120,6 +3121,7 @@ Tuplestorestate* ExecMakeTableFunctionResult( Datum datum = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_prokind, &isNull); proIsProcedure = PROC_IS_PRO(CharGetDatum(datum)); + is_pipelined = PROC_IS_PIPELINED(CharGetDatum(datum)); if (proIsProcedure) { (reinterpret_cast(funcexpr))->prokind = 'p'; } else { @@ -3135,6 +3137,7 @@ Tuplestorestate* ExecMakeTableFunctionResult( ReleaseSysCache(tp); } else { proIsProcedure = PROC_IS_PRO(prokind); + is_pipelined = PROC_IS_PIPELINED(prokind); u_sess->SPI_cxt.is_stp = savedIsSTP; } } @@ -3467,7 +3470,7 @@ no_function_result: MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, u_sess->attr.attr_memory.work_mem); rsinfo.setResult = tupstore; - if (!returnsSet) { + if (!returnsSet && !is_pipelined) { int natts = expectedDesc->natts; Datum* nulldatums = NULL; bool* nullflags = NULL; diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index fadb38ceb0..2eed33cdaa 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -6384,7 +6384,6 @@ bool TryLockBuffer(Buffer buffer, int mode, bool must_wait) bool ConditionalLockBuffer(Buffer buffer) { volatile BufferDesc *buf = NULL; - int dms_retry_times = 0; Assert(BufferIsValid(buffer)); if (BufferIsLocal(buffer)) { diff --git a/src/gausskernel/storage/replication/syncrep.cpp b/src/gausskernel/storage/replication/syncrep.cpp index 2f34aff40f..ff01ed30bb 100755 --- a/src/gausskernel/storage/replication/syncrep.cpp +++ b/src/gausskernel/storage/replication/syncrep.cpp @@ -85,8 +85,6 @@ static SyncStandbyNumState check_sync_standbys_num(const SyncRepStandbyData* syn static bool judge_sync_standbys_num(const SyncRepStandbyData* sync_standbys, int num_standbys, SyncStandbyNumState* state); -static void SyncRepQueueInsert(int mode); -static bool SyncRepCancelWait(void); static void SyncRepWaitCompletionQueue(); static void SyncRepNotifyComplete(); @@ -497,59 +495,6 @@ SyncWaitRet SyncRepWaitForLSN(XLogRecPtr XactCommitLSN, bool enableHandleCancel) return waitStopRes; } -/* - * Insert t_thrd.proc into the specified SyncRepQueue, maintaining sorted invariant. - * - * Usually we will go at tail of queue, though it's possible that we arrive - * here out of order, so start at tail and work back to insertion point. - */ -static void SyncRepQueueInsert(int mode) -{ - PGPROC *proc = NULL; - - Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE); - proc = (PGPROC *)SHMQueuePrev(&(t_thrd.walsender_cxt.WalSndCtl->SyncRepQueue[mode]), - &(t_thrd.walsender_cxt.WalSndCtl->SyncRepQueue[mode]), - offsetof(PGPROC, syncRepLinks)); - - while (proc != NULL) { - /* - * Stop at the queue element that we should after to ensure the queue - * is ordered by LSN. The same lsn is allowed in sync queue. - */ - if (XLByteLE(proc->waitLSN, t_thrd.proc->waitLSN)) - break; - - proc = (PGPROC *)SHMQueuePrev(&(t_thrd.walsender_cxt.WalSndCtl->SyncRepQueue[mode]), &(proc->syncRepLinks), - offsetof(PGPROC, syncRepLinks)); - } - - if (proc != NULL) - SHMQueueInsertAfter(&(proc->syncRepLinks), &(t_thrd.proc->syncRepLinks)); - else - SHMQueueInsertAfter(&(t_thrd.walsender_cxt.WalSndCtl->SyncRepQueue[mode]), &(t_thrd.proc->syncRepLinks)); -} - -/* - * Acquire SyncRepLock and cancel any wait currently not in completion queue. - */ -static bool SyncRepCancelWait(void) -{ - bool success = false; - - LWLockAcquire(SyncRepLock, LW_EXCLUSIVE); - if (!t_thrd.proc->syncRepInCompleteQueue) { - if (!SHMQueueIsDetached(&(t_thrd.proc->syncRepLinks))) { - SHMQueueDelete(&(t_thrd.proc->syncRepLinks)); - } - t_thrd.proc->syncRepState = SYNC_REP_NOT_WAITING; - success = true; - } - LWLockRelease(SyncRepLock); - - return success; -} - void SyncRepCleanupAtProcExit(void) { if (t_thrd.proc->syncRepLinks.prev || t_thrd.proc->syncRepLinks.next || @@ -629,10 +574,6 @@ void SyncRepReleaseWaiters(void) XLogRecPtr writePtr; XLogRecPtr flushPtr; XLogRecPtr replayPtr; - int numreceive = 0; - int numwrite = 0; - int numflush = 0; - int numapply = 0; bool got_recptr = false; bool am_sync = false; diff --git a/src/test/regress/expected/plpgsql/plpgsql_normal.out b/src/test/regress/expected/plpgsql/plpgsql_normal.out index 888e15d6e1..8201c9929a 100644 --- a/src/test/regress/expected/plpgsql/plpgsql_normal.out +++ b/src/test/regress/expected/plpgsql/plpgsql_normal.out @@ -313,6 +313,7 @@ for i in 1 .. count loop result.c1 = 123; result.c2 = 32; pipe row(result); +pipe row(null); end loop; return; end; @@ -320,7 +321,24 @@ $BODY$; select count(*) from func_pipelined_022(10000); count ------- - 10000 + 20000 +(1 row) + +select func_pipelined_022(10); + func_pipelined_022 +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)","(123,32)","(,)"} +(1 row) + +select * from func_pipelined_022(0); + c1 | c2 +----+---- +(0 rows) + +select func_pipelined_022(0); + func_pipelined_022 +-------------------- + (1 row) reset work_mem; diff --git a/src/test/regress/sql/plpgsql/plpgsql_normal.sql b/src/test/regress/sql/plpgsql/plpgsql_normal.sql index 807b63312b..bb9df6ccb0 100644 --- a/src/test/regress/sql/plpgsql/plpgsql_normal.sql +++ b/src/test/regress/sql/plpgsql/plpgsql_normal.sql @@ -162,11 +162,15 @@ for i in 1 .. count loop result.c1 = 123; result.c2 = 32; pipe row(result); +pipe row(null); end loop; return; end; $BODY$; select count(*) from func_pipelined_022(10000); +select func_pipelined_022(10); +select * from func_pipelined_022(0); +select func_pipelined_022(0); reset work_mem; -- nest function call for ereport -- Gitee From 6f25f4b4a766ef7512d882e9756fc59f6c2a70a5 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Fri, 12 Jul 2024 17:38:48 +0800 Subject: [PATCH 039/347] =?UTF-8?q?cursor=E4=B8=8D=E5=B8=A6sql=E8=AF=AD?= =?UTF-8?q?=E5=8F=A5=E6=97=B6=E5=9C=A8=E6=89=93=E5=BC=80cursor=E6=97=B6?= =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96rowtype?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/gram.y | 1 + src/common/pl/plpgsql/src/pl_comp.cpp | 33 ++-- src/common/pl/plpgsql/src/pl_exec.cpp | 38 ++++- .../sqladvisor/sqladvisor_online.cpp | 3 +- src/include/utils/plpgsql.h | 3 + .../expected/plpgsql_cursor_rowtype.out | 148 ++++++++++++++++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 53 +++++++ 7 files changed, 256 insertions(+), 23 deletions(-) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index b2ab0c606f..79db439d83 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -9783,6 +9783,7 @@ read_datatype(int tok) { PLpgSQL_type *newp = plpgsql_build_datatype(UNKNOWNOID, -1, InvalidOid); newp->cursorExpr = var->cursor_explicit_expr; + newp->cursorDno = var->dno; return newp; } } else if (ns && ns->itemtype == PLPGSQL_NSTYPE_ROW) diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index fd71cf3c7d..ed940f7d5e 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -3876,6 +3876,7 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ rec->refname = pstrdup(refname); rec->lineno = lineno; rec->expr = dtype->cursorExpr; + rec->cursorDno = dtype->cursorDno; rec->addNamespace = add2namespace; if (ALLOW_PROCEDURE_COMPILE_CHECK) { MemoryContext temp = NULL; @@ -3883,25 +3884,27 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ bool *nulls = NULL; errno_t rc = EOK; - if(u_sess->plsql_cxt.curr_compile_context != NULL && - u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt != NULL) { - target_cxt = u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt; - temp = MemoryContextSwitchTo(target_cxt); - } + if (rec->expr) { + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt != NULL) { + target_cxt = u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt; + temp = MemoryContextSwitchTo(target_cxt); + } - rec->tupdesc = getCursorTupleDesc(rec->expr, false, false); + rec->tupdesc = getCursorTupleDesc(rec->expr, false, false); - nulls = (bool*)palloc(rec->tupdesc->natts * sizeof(bool)); - rc = memset_s(nulls, rec->tupdesc->natts * sizeof(bool), true, rec->tupdesc->natts * sizeof(bool)); - securec_check(rc, "\0", "\0"); + nulls = (bool *)palloc(rec->tupdesc->natts * sizeof(bool)); + rc = memset_s(nulls, rec->tupdesc->natts * sizeof(bool), true, rec->tupdesc->natts * sizeof(bool)); + securec_check(rc, "\0", "\0"); - rec->tup = (HeapTuple)tableam_tops_form_tuple(rec->tupdesc, NULL, nulls); - rec->freetupdesc = (rec->tupdesc != NULL) ? true : false; - rec->freetup = (rec->tup != NULL) ? true : false; - pfree_ext(nulls); + rec->tup = (HeapTuple)tableam_tops_form_tuple(rec->tupdesc, NULL, nulls); + rec->freetupdesc = (rec->tupdesc != NULL) ? true : false; + rec->freetup = (rec->tup != NULL) ? true : false; + pfree_ext(nulls); - if (target_cxt) { - temp = MemoryContextSwitchTo(temp); + if (target_cxt) { + temp = MemoryContextSwitchTo(temp); + } } } else { rec->tup = NULL; diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index c94d150eb3..4d2c2169c6 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1173,6 +1173,8 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d MemoryContext temp = NULL; MemoryContext target_cxt = NULL; + Assert(rec->expr != NULL); + target_cxt = rec->ispkg ? rec->pkg->pkg_cxt : CurrentMemoryContext; temp = MemoryContextSwitchTo(target_cxt); @@ -1301,7 +1303,9 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, } if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { - exec_cursor_rowtype_init(&estate, estate.datums[i], func); + PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; + if (rec->expr) + exec_cursor_rowtype_init(&estate, estate.datums[i], func); } } @@ -1641,7 +1645,9 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, } if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { - exec_cursor_rowtype_init(&estate, estate.datums[i], func); + PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; + if (rec->expr) + exec_cursor_rowtype_init(&estate, estate.datums[i], func); } } @@ -2251,9 +2257,12 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) estate.datums[i] = copy_plpgsql_datum(func->datums[i]); } else { estate.datums[i] = func->datums[i]; - } - if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { - exec_cursor_rowtype_init(&estate, estate.datums[i], func); + } + + if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { + PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; + if (rec->expr) + exec_cursor_rowtype_init(&estate, estate.datums[i], func); } } @@ -8721,6 +8730,21 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) * ---------- */ query = stmt->query; + for (int i = 0; i < estate->ndatums; i++) { + if (estate->datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { + PLpgSQL_rec *rec = (PLpgSQL_rec*)estate->datums[i]; + PLpgSQL_var *cursor = (PLpgSQL_var*)estate->datums[rec->cursorDno]; + if (rec->cursorDno == curvar->dno) { + MemoryContext temp = NULL; + MemoryContext target_cxt = NULL; + target_cxt = rec->ispkg ? rec->pkg->pkg_cxt : CurrentMemoryContext; + temp = MemoryContextSwitchTo(target_cxt); + rec->expr = copyPLpgsqlExpr(query); + exec_cursor_rowtype_init(estate, estate->datums[i], estate->func); + temp = MemoryContextSwitchTo(temp); + } + } + } gsplsql_set_query(query); if (query->plan == NULL) { exec_prepare_plan(estate, query, stmt->cursor_options); @@ -14291,7 +14315,9 @@ plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata) for (i = 0; i < estate.ndatums; i++) { estate.datums[i] = copy_plpgsql_datum(func->datums[i]); if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { - exec_cursor_rowtype_init(&estate, estate.datums[i], func); + PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; + if (rec->expr) + exec_cursor_rowtype_init(&estate, estate.datums[i], func); } } diff --git a/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp b/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp index 5f1ab195ae..634ef6b447 100644 --- a/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp +++ b/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp @@ -44,7 +44,6 @@ static bool checkGlobalAdvMemSize(); static void copyAdviseSearchPathFromSess(AdviseSearchPath* sp); static PLpgSQL_execstate* copyPLpgEstate(PLpgSQL_execstate* srcEstate); -static PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr); static PLpgSQL_nsitem* copyPLpgNsitem(PLpgSQL_nsitem* srcNs); static PLpgSQL_function* copyPLpgsqlFunc(PLpgSQL_function* srcFunc); static bool equalParam(ParamListInfo bpA, ParamListInfo bpB); @@ -274,7 +273,7 @@ static bool equalPLpgsqlExpr(PLpgSQL_expr* exprA, PLpgSQL_expr* exprB) return false; } -static PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr) +PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr) { if (srcExpr == NULL) { return NULL; diff --git a/src/include/utils/plpgsql.h b/src/include/utils/plpgsql.h index d4de942e3e..e115610325 100644 --- a/src/include/utils/plpgsql.h +++ b/src/include/utils/plpgsql.h @@ -474,6 +474,7 @@ typedef struct { /* openGauss data type */ Oid tableofOid; TypeDependExtend* dependExtend; PLpgSQL_expr* cursorExpr; + int cursorDno; } PLpgSQL_type; typedef struct { @@ -598,6 +599,7 @@ typedef struct { /* Record variable (non-fixed structure) */ PLpgSQL_package* pkg = NULL; PLpgSQL_expr* default_val = NULL; PLpgSQL_expr* expr = NULL; + int cursorDno; } PLpgSQL_rec; typedef struct { /* Field in record */ @@ -2122,6 +2124,7 @@ extern bool CheckElementParsetreeTag(Node* parsetree); extern Datum transVaratt1BTo4B(Datum value); extern PLpgSQL_datum* deepCopyPlpgsqlDatum(PLpgSQL_datum* datum); extern PLpgSQL_var* copyPlpgsqlVar(PLpgSQL_var* src); +extern PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr); extern void assign_text_var(PLpgSQL_var* var, const char* str); extern MemoryContext GetAvailableHoldContext(List* PortalContextList); diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 518f4afc9e..5f8a26bbe8 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -296,6 +296,154 @@ INFO: (ppp1)var1: 66 (1 row) +CREATE TABLE STORAGE_LARGE_TABLE_STORAGE_TABLE_000 (c_id int, +c_d_id int NOT NULL, +c_w_id int NOT NULL, +c_first varchar(16) NOT NULL, +c_middle char(2), +c_last varchar(16) NOT NULL, +c_street_1 varchar(20) NOT NULL, +c_street_2 varchar(20), +c_city varchar(20) NOT NULL, +c_state char(2) NOT NULL, +c_zip char(9) NOT NULL, +c_phone char(16) NOT NULL, +c_since timestamp, +c_credit char(2) NOT NULL, +c_credit_lim numeric(12,2), +c_discount numeric(4,4), +c_balance numeric(12,2), +c_ytd_payment numeric(12,2) NOT NULL, +c_payment_cnt int NOT NULL, +c_delivery_cnt int NOT NULL, +c_data varchar(500) NOT NULL); +CREATE TABLE STORAGE_LARGE_CURSOR_TABLE_216 AS SELECT * FROM STORAGE_LARGE_TABLE_STORAGE_TABLE_000 WHERE C_ID=0; +declare +temp integer := 0; +id_temp integer; +name_temp text; +begin +for temp in 0..99 loop +INSERT INTO STORAGE_LARGE_CURSOR_TABLE_216 VALUES (temp,temp,temp,'iscmvlstpn','OE','BARBARBAR','bkilipzfcxcle','pmbwodmpvhvpafbj','dyfaoptppzjcgjrvyqa','uq',480211111,9400872216162535,null,'GC',50000.0,0.4361328,-10.0,10.0,1,0,'QVLDETANRBRBURBMZQUJSHOQNGGSMNTECCIPRIIRDHIRWIYNPFZCSYKXXYSCDSF'); +end loop; +end; +/ +Declare +Type MyRefCur IS Ref Cursor RETURN STORAGE_LARGE_CURSOR_TABLE_216%ROWTYPE; +c1 MyRefCur; +temp c1%RowType; +Begin +Open c1 For Select * from STORAGE_LARGE_CURSOR_TABLE_216 ORDER BY C_ID; +LOOP +FETCH C1 INTO temp; +EXIT WHEN C1%NOTFOUND; +raise info 'str1 is %', temp.C_id; +END LOOP; +Close c1; +End; +/ +INFO: str1 is 0 +INFO: str1 is 1 +INFO: str1 is 2 +INFO: str1 is 3 +INFO: str1 is 4 +INFO: str1 is 5 +INFO: str1 is 6 +INFO: str1 is 7 +INFO: str1 is 8 +INFO: str1 is 9 +INFO: str1 is 10 +INFO: str1 is 11 +INFO: str1 is 12 +INFO: str1 is 13 +INFO: str1 is 14 +INFO: str1 is 15 +INFO: str1 is 16 +INFO: str1 is 17 +INFO: str1 is 18 +INFO: str1 is 19 +INFO: str1 is 20 +INFO: str1 is 21 +INFO: str1 is 22 +INFO: str1 is 23 +INFO: str1 is 24 +INFO: str1 is 25 +INFO: str1 is 26 +INFO: str1 is 27 +INFO: str1 is 28 +INFO: str1 is 29 +INFO: str1 is 30 +INFO: str1 is 31 +INFO: str1 is 32 +INFO: str1 is 33 +INFO: str1 is 34 +INFO: str1 is 35 +INFO: str1 is 36 +INFO: str1 is 37 +INFO: str1 is 38 +INFO: str1 is 39 +INFO: str1 is 40 +INFO: str1 is 41 +INFO: str1 is 42 +INFO: str1 is 43 +INFO: str1 is 44 +INFO: str1 is 45 +INFO: str1 is 46 +INFO: str1 is 47 +INFO: str1 is 48 +INFO: str1 is 49 +INFO: str1 is 50 +INFO: str1 is 51 +INFO: str1 is 52 +INFO: str1 is 53 +INFO: str1 is 54 +INFO: str1 is 55 +INFO: str1 is 56 +INFO: str1 is 57 +INFO: str1 is 58 +INFO: str1 is 59 +INFO: str1 is 60 +INFO: str1 is 61 +INFO: str1 is 62 +INFO: str1 is 63 +INFO: str1 is 64 +INFO: str1 is 65 +INFO: str1 is 66 +INFO: str1 is 67 +INFO: str1 is 68 +INFO: str1 is 69 +INFO: str1 is 70 +INFO: str1 is 71 +INFO: str1 is 72 +INFO: str1 is 73 +INFO: str1 is 74 +INFO: str1 is 75 +INFO: str1 is 76 +INFO: str1 is 77 +INFO: str1 is 78 +INFO: str1 is 79 +INFO: str1 is 80 +INFO: str1 is 81 +INFO: str1 is 82 +INFO: str1 is 83 +INFO: str1 is 84 +INFO: str1 is 85 +INFO: str1 is 86 +INFO: str1 is 87 +INFO: str1 is 88 +INFO: str1 is 89 +INFO: str1 is 90 +INFO: str1 is 91 +INFO: str1 is 92 +INFO: str1 is 93 +INFO: str1 is 94 +INFO: str1 is 95 +INFO: str1 is 96 +INFO: str1 is 97 +INFO: str1 is 98 +INFO: str1 is 99 +drop table STORAGE_LARGE_TABLE_STORAGE_TABLE_000; +drop table STORAGE_LARGE_CURSOR_TABLE_216; --test: drop column create table int_4_2(a NUMBER, d NUMBER, b VARCHAR2(5)); insert into int_4_2(a, d, b) values(3, 6,'johan'); diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 94c76c2eec..091f8e2ce0 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -211,6 +211,59 @@ call pck3_1.ppp1(); call pck3_1.ppp1(); call pck3_1.ppp1(); +CREATE TABLE STORAGE_LARGE_TABLE_STORAGE_TABLE_000 (c_id int, +c_d_id int NOT NULL, +c_w_id int NOT NULL, +c_first varchar(16) NOT NULL, +c_middle char(2), +c_last varchar(16) NOT NULL, +c_street_1 varchar(20) NOT NULL, +c_street_2 varchar(20), +c_city varchar(20) NOT NULL, +c_state char(2) NOT NULL, +c_zip char(9) NOT NULL, +c_phone char(16) NOT NULL, +c_since timestamp, +c_credit char(2) NOT NULL, +c_credit_lim numeric(12,2), +c_discount numeric(4,4), +c_balance numeric(12,2), +c_ytd_payment numeric(12,2) NOT NULL, +c_payment_cnt int NOT NULL, +c_delivery_cnt int NOT NULL, +c_data varchar(500) NOT NULL); + +CREATE TABLE STORAGE_LARGE_CURSOR_TABLE_216 AS SELECT * FROM STORAGE_LARGE_TABLE_STORAGE_TABLE_000 WHERE C_ID=0; + +declare +temp integer := 0; +id_temp integer; +name_temp text; +begin +for temp in 0..99 loop +INSERT INTO STORAGE_LARGE_CURSOR_TABLE_216 VALUES (temp,temp,temp,'iscmvlstpn','OE','BARBARBAR','bkilipzfcxcle','pmbwodmpvhvpafbj','dyfaoptppzjcgjrvyqa','uq',480211111,9400872216162535,null,'GC',50000.0,0.4361328,-10.0,10.0,1,0,'QVLDETANRBRBURBMZQUJSHOQNGGSMNTECCIPRIIRDHIRWIYNPFZCSYKXXYSCDSF'); +end loop; +end; +/ + +Declare +Type MyRefCur IS Ref Cursor RETURN STORAGE_LARGE_CURSOR_TABLE_216%ROWTYPE; +c1 MyRefCur; +temp c1%RowType; +Begin +Open c1 For Select * from STORAGE_LARGE_CURSOR_TABLE_216 ORDER BY C_ID; +LOOP +FETCH C1 INTO temp; +EXIT WHEN C1%NOTFOUND; +raise info 'str1 is %', temp.C_id; +END LOOP; +Close c1; +End; +/ + +drop table STORAGE_LARGE_TABLE_STORAGE_TABLE_000; +drop table STORAGE_LARGE_CURSOR_TABLE_216; + --test: drop column create table int_4_2(a NUMBER, d NUMBER, b VARCHAR2(5)); insert into int_4_2(a, d, b) values(3, 6,'johan'); -- Gitee From 1e4505e3d4a2303ee00c3bb57c7fc63cf252150d Mon Sep 17 00:00:00 2001 From: blig Date: Fri, 12 Jul 2024 10:06:09 +0800 Subject: [PATCH 040/347] =?UTF-8?q?=E9=80=BB=E8=BE=91=E5=A4=8D=E5=88=B6?= =?UTF-8?q?=E5=B9=B6=E8=A1=8C=E8=A7=A3=E7=A0=81=E7=9A=84begin=E4=B8=AD?= =?UTF-8?q?=E6=B7=BB=E5=8A=A0xid?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_basebackup/pg_recvlogical.cpp | 10 ++++++++++ .../replication/logical/parallel_reorderbuffer.cpp | 12 ++++++++++++ 2 files changed, 22 insertions(+) diff --git a/src/bin/pg_basebackup/pg_recvlogical.cpp b/src/bin/pg_basebackup/pg_recvlogical.cpp index a8269e2aa7..97f6d7dbab 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.cpp +++ b/src/bin/pg_basebackup/pg_recvlogical.cpp @@ -390,6 +390,16 @@ static void BeginToText(const char* stream, uint32 *curPos, PQExpBuffer res) *curPos += sizeof(LSNlower); appendPQExpBuffer(res, "first_lsn: %X/%X", LSNupper, LSNlower); + if (stream[*curPos] == 'X') { + *curPos += 1; + uint32 xidupper = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(xidupper); + uint32 xidlower = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(xidlower); + uint64 xid = ((uint64)(xidupper) << upperPart) + xidlower; + appendPQExpBuffer(res, " xid: %lu", xid); + } + if (stream[*curPos] == 'T') { *curPos += 1; uint32 timeLen = ntohl(*(uint32 *)(&stream[*curPos])); diff --git a/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp b/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp index 6366a09380..0c4095b421 100644 --- a/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp +++ b/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp @@ -1506,6 +1506,13 @@ static void ParallelOutputBegin(StringInfo out, logicalLog *change, ParallelDeco appendStringInfoChar(out, 'B'); pq_sendint64(out, change->csn); pq_sendint64(out, txn->first_lsn); + + if (pdata->pOptions.include_xids) { + appendStringInfoChar(out, 'X'); + pq_sendint64(out, txn->xid); + beginLen += 1 + sizeof(uint64); + } + if (pdata->pOptions.include_timestamp) { appendStringInfoChar(out, 'T'); const char *timeStamp = timestamptz_to_str(txn->commit_time); @@ -1532,6 +1539,11 @@ static void ParallelOutputBegin(StringInfo out, logicalLog *change, ParallelDeco const uint32 upperPart = 32; appendStringInfo(out, "BEGIN CSN: %lu first_lsn: %X/%X", change->csn, (uint32)(txn->first_lsn >> upperPart), (uint32)(txn->first_lsn)); + + if (pdata->pOptions.include_xids) { + appendStringInfo(out, " xid: %lu", txn->xid); + } + if (pdata->pOptions.include_timestamp) { const char *timeStamp = timestamptz_to_str(txn->commit_time); appendStringInfo(out, " commit_time: %s", timeStamp); -- Gitee From bcf34f0cdec943b6f6433ca2b246ef8f8cb1df11 Mon Sep 17 00:00:00 2001 From: chen-chao666 <1790599142@qq.com> Date: Mon, 15 Jul 2024 16:30:24 +0800 Subject: [PATCH 041/347] =?UTF-8?q?dms=E6=8E=A8=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 8391af312a..e0e6760991 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=c6fe7641f4f13fb961ceebe1f8725865a2afe3ee +dms_commit_id=1dcd3d829655f517e24ba753c770f1b45cd5edf6 dss_commit_id=04b09e0414525a7bb78b8baaabfedcc675f91102 cbb_commit_id=d22f1e92dc9fc75efa8f0f027321faf0fd1c770b -- Gitee From 5896a05da4ae7805c638d4e94e62cc7d40bccc35 Mon Sep 17 00:00:00 2001 From: yuu Date: Thu, 18 Jan 2024 06:14:52 +0000 Subject: [PATCH 042/347] use heap_multi_insert for the insert into xxx select yyy query. fix comment from committer add test --- src/bin/gs_guc/cluster_guc.conf | 1 + src/common/backend/nodes/copyfuncs.cpp | 2 ++ src/common/backend/nodes/equalfuncs.cpp | 2 ++ src/common/backend/nodes/outfuncs.cpp | 6 +++++ src/common/backend/nodes/readfuncs.cpp | 4 ++++ src/common/backend/parser/analyze.cpp | 18 +++++++++++++++ src/common/backend/parser/gram.y | 5 +++++ src/common/backend/utils/init/globals.cpp | 1 + src/common/backend/utils/misc/guc.cpp | 14 ++++++++++++ src/gausskernel/optimizer/plan/createplan.cpp | 1 + .../runtime/executor/nodeModifyTable.cpp | 22 ++++++++++++++----- src/include/executor/node/nodeModifyTable.h | 3 +++ .../knl/knl_guc/knl_session_attr_storage.h | 2 +- src/include/miscadmin.h | 1 + src/include/nodes/parsenodes_common.h | 2 ++ .../expected/iud_heap_multi_insert.out | 20 +++++++++++++++++ .../regress/output/recovery_2pc_tools.source | 1 + src/test/regress/parallel_schedule0 | 2 ++ .../regress/sql/iud_heap_multi_insert.sql | 13 +++++++++++ 19 files changed, 113 insertions(+), 7 deletions(-) create mode 100644 src/test/regress/expected/iud_heap_multi_insert.out create mode 100644 src/test/regress/sql/iud_heap_multi_insert.sql diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index e800cc4995..2d0ac8415b 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -219,6 +219,7 @@ enable_global_syscache|bool|0,0|NULL|NULL| gpc_clean_timeout|int|300,86400|NULL|NULL| enable_hashagg|bool|0,0|NULL|NULL| enable_hashjoin|bool|0,0|NULL|NULL| +enable_heap_multi_insert_for_insert_select|bool|0,0|NULL|NULL| enable_sortgroup_agg|bool|0,0|NULL|NULL| enable_hdfs_predicate_pushdown|bool|0,0|NULL|NULL| enable_hypo_index|bool|0,0|NULL|NULL| diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 47ce926f5e..885eef2b0a 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -4938,6 +4938,7 @@ static Query* _copyQuery(const Query* from) COPY_STRING_FIELD(unique_sql_text); #endif COPY_SCALAR_FIELD(can_push); + COPY_SCALAR_FIELD(is_dist_insertselect); COPY_SCALAR_FIELD(unique_check); COPY_NODE_FIELD(resultRelations); COPY_NODE_FIELD(withCheckOptions); @@ -5008,6 +5009,7 @@ static InsertStmt* _copyInsertStmt(const InsertStmt* from) } COPY_SCALAR_FIELD(isRewritten); COPY_SCALAR_FIELD(hasIgnore); + COPY_SCALAR_FIELD(is_dist_insertselect); return newnode; } diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index 8e48359b3a..be45e68739 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -979,6 +979,7 @@ static bool _equalQuery(const Query* a, const Query* b) COMPARE_SCALAR_FIELD(use_star_targets); COMPARE_SCALAR_FIELD(is_from_full_join_rewrite); COMPARE_SCALAR_FIELD(can_push); + COMPARE_SCALAR_FIELD(is_dist_insertselect); COMPARE_SCALAR_FIELD(unique_check); COMPARE_NODE_FIELD(resultRelations); COMPARE_NODE_FIELD(withCheckOptions); @@ -1011,6 +1012,7 @@ static bool _equalInsertStmt(const InsertStmt* a, const InsertStmt* b) } COMPARE_NODE_FIELD(upsertClause); COMPARE_SCALAR_FIELD(hasIgnore); + COMPARE_SCALAR_FIELD(is_dist_insertselect); return true; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 7c4096165c..5c968c58a0 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -4174,6 +4174,9 @@ static void _outInsertStmt(StringInfo str, InsertStmt* node) if (t_thrd.proc->workingVersionNum >= KEYWORD_IGNORE_COMPART_VERSION_NUM) { WRITE_BOOL_FIELD(hasIgnore); } + if (t_thrd.proc->workingVersionNum >= INSERT_INTO_SELECT_VERSION_NUM) { + WRITE_BOOL_FIELD(is_dist_insertselect); + } } static void _outUpdateStmt(StringInfo str, UpdateStmt* node) @@ -4956,6 +4959,9 @@ static void _outQuery(StringInfo str, Query* node) if (t_thrd.proc->workingVersionNum >= PARTIALPUSH_VERSION_NUM) { WRITE_BOOL_FIELD(can_push); } + if (t_thrd.proc->workingVersionNum >= INSERT_INTO_SELECT_VERSION_NUM) { + WRITE_BOOL_FIELD(is_dist_insertselect); + } if (t_thrd.proc->workingVersionNum >= SUBLINKPULLUP_VERSION_NUM) { WRITE_BOOL_FIELD(unique_check); } diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 815eb8d4a8..f9f97a17c8 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -1629,6 +1629,10 @@ static Query* _readQuery(void) { READ_BOOL_FIELD(can_push); } + IF_EXIST(is_dist_insertselect) + { + READ_BOOL_FIELD(is_dist_insertselect); + } IF_EXIST(unique_check) { diff --git a/src/common/backend/parser/analyze.cpp b/src/common/backend/parser/analyze.cpp index 658823c2fb..ead1a76f4a 100644 --- a/src/common/backend/parser/analyze.cpp +++ b/src/common/backend/parser/analyze.cpp @@ -1885,10 +1885,15 @@ static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt) pstate->p_relnamespace = NIL; sub_varnamespace = pstate->p_varnamespace; pstate->p_varnamespace = NIL; + if (stmt->returningList || stmt->upsertClause) + qry->is_dist_insertselect = false; + else + qry->is_dist_insertselect = stmt->is_dist_insertselect; } else { sub_rtable = NIL; /* not used, but keep compiler quiet */ sub_relnamespace = NIL; sub_varnamespace = NIL; + qry->is_dist_insertselect = false; } /* @@ -1912,6 +1917,19 @@ static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt) (errcode(ERRCODE_INVALID_OPERATION), errmsg("Not allowed to insert into relation pg_auth_history."))); } + if (qry->is_dist_insertselect) { + Oid relid = RelationGetRelid(targetrel); + HeapTuple classtup = SearchSysCache1(RELOID, relid); + Form_pg_class class_struct = (Form_pg_class)GETSTRUCT(classtup); + if (class_struct->parttype == PARTTYPE_PARTITIONED_RELATION || + class_struct->parttype == PARTTYPE_SUBPARTITIONED_RELATION || + class_struct->parttype == PARTTYPE_VALUE_PARTITIONED_RELATION ) { + qry->is_dist_insertselect = false; + } + ReleaseSysCache(classtup); + } + + if (targetrel != NULL && ((unsigned int)RelationGetInternalMask(targetrel) & INTERNAL_MASK_DINSERT)) { ereport(ERROR, diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 7b3d52ea34..77c8687fd2 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -23028,6 +23028,8 @@ insert_rest: $$->cols = NIL; $$->selectStmt = $1; $$->isRewritten = false; + if (((SelectStmt*)$1)->valuesLists == NULL) + $$->is_dist_insertselect = true; } | '(' insert_column_list ')' SelectStmt { @@ -23035,6 +23037,8 @@ insert_rest: $$->cols = $2; $$->selectStmt = $4; $$->isRewritten = false; + if (((SelectStmt*)$4)->valuesLists == NULL) + $$->is_dist_insertselect = true; } | DEFAULT VALUES { @@ -23042,6 +23046,7 @@ insert_rest: $$->cols = NIL; $$->selectStmt = NULL; $$->isRewritten = false; + $$->is_dist_insertselect = false; } ; diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 3904cc8820..48e2e01c21 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -81,6 +81,7 @@ const uint32 GRAND_VERSION_NUM = 92938; * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 INSERT_INTO_SELECT_VERSION_NUM = 92938; const uint32 ROTATE_UNROTATE_VERSION_NUM = 92937; const uint32 PIPELINED_FUNCTION_VERSION_NUM = 92936; const uint32 DISABLE_CONSTRAINT_VERSION_NUM = 92931; diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index f805cb740b..12ababad01 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -1956,6 +1956,20 @@ static void InitConfigureNamesBool() NULL, NULL }, + {{ + "enable_heap_multi_insert_for_insert_select", + PGC_USERSET, + NODE_SINGLENODE, + QUERY_TUNING, + gettext_noop("Enable enable heap_multi_insert for query like 'insert into xxx select yyy', as fast as the copy command"), + NULL + }, + &u_sess->attr.attr_storage.enable_heap_multi_insert_for_insert_select, + true, + NULL, + NULL, + NULL + }, {{"enable_default_ustore_table", PGC_USERSET, NODE_SINGLENODE, diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index 8df755db7c..1e5dd03e16 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -9005,6 +9005,7 @@ ModifyTable* make_modifytable(CmdType operation, bool canSetTag, List* resultRel { ModifyTable* node = makeNode(ModifyTable); Plan* plan = &node->plan; + node->is_dist_insertselect = root->parse->is_dist_insertselect; double total_size; ListCell* subnode = NULL; double local_rows = 0; diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp index 0073239a30..43522e620c 100644 --- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp +++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp @@ -3544,13 +3544,23 @@ static TupleTableSlot* ExecModifyTable(PlanState* state) #endif if (operation == CMD_INSERT) { - if (node->ps.type == T_ModifyTableState || node->mt_upsert->us_action != UPSERT_NONE || node->isReplace || - (result_rel_info->ri_TrigDesc != NULL && (result_rel_info->ri_TrigDesc->trig_insert_before_row || - result_rel_info->ri_TrigDesc->trig_insert_instead_row))) + if (node->ps.type == T_ModifyTableState || + node->mt_upsert->us_action != UPSERT_NONE || + node->isReplace || + (result_rel_info->ri_TrigDesc != NULL && (result_rel_info->ri_TrigDesc->trig_insert_before_row || + result_rel_info->ri_TrigDesc->trig_insert_instead_row)) || + !ENABLE_HEAP_MULTI_INSERT_FOR_INSERT_SELECT || + (result_rel_info->ri_RelationDesc->rd_att->constr != NULL && + result_rel_info->ri_RelationDesc->rd_att->constr->cons_autoinc != NULL) || + RelationIsPartition(result_rel_info->ri_RelationDesc)) { ExecInsert = ExecInsertT; - else { - use_heap_multi_insert = true; - ExecInsert = ExecInsertT; + } else { + if (RelationIsAstoreFormat(result_rel_info->ri_RelationDesc)) { + use_heap_multi_insert = true; + ExecInsert = ExecInsertT; + } else { + ExecInsert = ExecInsertT; + } } if (use_heap_multi_insert) { diff --git a/src/include/executor/node/nodeModifyTable.h b/src/include/executor/node/nodeModifyTable.h index 27b99ec4f9..bca90151db 100644 --- a/src/include/executor/node/nodeModifyTable.h +++ b/src/include/executor/node/nodeModifyTable.h @@ -69,4 +69,7 @@ extern void ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, EState *est extern bool ExecComputeStoredUpdateExpr(ResultRelInfo *resultRelInfo, EState *estate, TupleTableSlot *slot, Tuple tuple, CmdType cmdtype, ItemPointer otid, Oid oldPartitionOid, int2 bucketid); +extern bool isSingleMode; +extern bool isRestoreMode; +#define ENABLE_HEAP_MULTI_INSERT_FOR_INSERT_SELECT ((!isSingleMode) && (!isRestoreMode) && (u_sess->attr.attr_storage.enable_heap_multi_insert_for_insert_select)) #endif /* NODEMODIFYTABLE_H */ diff --git a/src/include/knl/knl_guc/knl_session_attr_storage.h b/src/include/knl/knl_guc/knl_session_attr_storage.h index 427942bf7f..ed796f3af2 100755 --- a/src/include/knl/knl_guc/knl_session_attr_storage.h +++ b/src/include/knl/knl_guc/knl_session_attr_storage.h @@ -310,7 +310,7 @@ typedef struct knl_session_attr_storage { /* pre-read parms */ int heap_bulk_read_size; int vacuum_bulk_read_size; - + bool enable_heap_multi_insert_for_insert_select; } knl_session_attr_storage; #endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_STORAGE */ diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index f6d569bad4..1a3366c1cd 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -48,6 +48,7 @@ extern const uint32 NBTREE_DEDUPLICATION_VERSION_NUM; extern const uint32 MULTI_CHARSET_VERSION_NUM; extern const uint32 SRF_FUSION_VERSION_NUM; extern const uint32 INNER_UNIQUE_VERSION_NUM; +extern const uint32 INSERT_INTO_SELECT_VERSION_NUM; extern const uint32 PARTITION_ENHANCE_VERSION_NUM; extern const uint32 SELECT_INTO_FILE_VERSION_NUM; extern const uint32 CHARACTER_SET_VERSION_NUM; diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h index 13236a1559..6ddb03ab9b 100644 --- a/src/include/nodes/parsenodes_common.h +++ b/src/include/nodes/parsenodes_common.h @@ -482,6 +482,7 @@ typedef struct InsertStmt { List *targetList; bool isRewritten; /* is this Stmt created by rewritter or end user? */ bool hasIgnore; /* is this Stmt containing ignore keyword? */ + bool is_dist_insertselect; } InsertStmt; /* ---------------------- @@ -2156,6 +2157,7 @@ typedef struct Query { char* unique_sql_text; /* used by unique sql plain text */ #endif bool can_push; + bool is_dist_insertselect; bool unique_check; /* true if the subquery is generated by general * sublink pullup, and scalar output is needed */ Oid* fixed_paramTypes; /* For plpy CTAS query. CTAS is a recursive call.CREATE query is the first rewrited. diff --git a/src/test/regress/expected/iud_heap_multi_insert.out b/src/test/regress/expected/iud_heap_multi_insert.out new file mode 100644 index 0000000000..c28be3d0d2 --- /dev/null +++ b/src/test/regress/expected/iud_heap_multi_insert.out @@ -0,0 +1,20 @@ +NOTICE: table "prep202407_hmi" does not exist, skipping +DROP TABLE +NOTICE: table "dest202407_hmi" does not exist, skipping +DROP TABLE +CREATE TABLE +CREATE TABLE + enable_heap_multi_insert_for_insert_select +-------------------------------------------- + on +(1 row) + +INSERT 0 10000 +INSERT 0 10000 + count +------- + 10000 +(1 row) + +DROP TABLE +DROP TABLE diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index d43f31df06..a0897fa15e 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -282,6 +282,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c enable_hashagg | bool | | | enable_hashjoin | bool | | | enable_hdfs_predicate_pushdown | bool | | | + enable_heap_multi_insert_for_insert_select | bool | | | enable_huge_pages | bool | | | enable_hypo_index | bool | | | enable_ignore_case_in_dquotes | bool | | | diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 95c8b87ee7..7684bd9149 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -1121,6 +1121,8 @@ test: slow_sql # test user@host test: user_host_test +# test for batchcmd insert for IUD +test: iud_heap_multi_insert # test for new_expr_by_flatten test: enable_expr_fusion_flatten # test for on update timestamp and generated column diff --git a/src/test/regress/sql/iud_heap_multi_insert.sql b/src/test/regress/sql/iud_heap_multi_insert.sql new file mode 100644 index 0000000000..684da55cf4 --- /dev/null +++ b/src/test/regress/sql/iud_heap_multi_insert.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS prep202407_hmi; +DROP TABLE IF EXISTS dest202407_hmi; + +CREATE TABLE prep202407_hmi (i int); +CREATE TABLE dest202407_hmi (i int); + +SHOW enable_heap_multi_insert_for_insert_select; +INSERT INTO prep202407_hmi SELECT generate_series(1,10000); +INSERT INTO dest202407_hmi SELECT * FROM prep202407_hmi; +SELECT count(*) FROM dest202407_hmi; + +DROP TABLE IF EXISTS prep202407_hmi; +DROP TABLE IF EXISTS dest202407_hmi; -- Gitee From 6f391b0d1c665950d79f1d4b599a6eeee0f01a41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=AD=90=E7=BB=B4?= Date: Mon, 15 Jul 2024 17:05:58 +0800 Subject: [PATCH 043/347] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=E3=80=90bugfix=E3=80=91=E6=8C=87=E9=92=88?= =?UTF-8?q?=E5=8F=98=E9=87=8F=E6=9C=AA=E5=8F=96=E5=86=85=E5=AE=B9=EF=BC=8C?= =?UTF-8?q?=E5=AF=BC=E8=87=B4=E6=89=93=E5=8D=B0=E4=BF=A1=E6=81=AF=E9=94=99?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_recovery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp index d33fa41eef..1ff9902324 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp @@ -455,7 +455,7 @@ bool SSPageReplayNeedSkip(RedoBufferInfo *bufferinfo, XLogRecPtr xlogLsn, XLogRe "xlogLsn:%lu, pageLsn:%lu", blockinfo->rnode.spcNode, blockinfo->rnode.dbNode, blockinfo->rnode.relNode, blockinfo->rnode.bucketNode, blockinfo->forknum, blockinfo->blkno, - xlogLsn, pageLsn))); + xlogLsn, *pageLsn))); } if (XLByteLE(xlogLsn, *pageLsn)) { @@ -468,7 +468,7 @@ bool SSPageReplayNeedSkip(RedoBufferInfo *bufferinfo, XLogRecPtr xlogLsn, XLogRe "xlogLsn:%lu, pageLsn:%lu", blockinfo->rnode.spcNode, blockinfo->rnode.dbNode, blockinfo->rnode.relNode, blockinfo->rnode.bucketNode, blockinfo->forknum, blockinfo->blkno, - xlogLsn, pageLsn))); + xlogLsn, *pageLsn))); #endif // do not release content_lock return true; -- Gitee From 661f29ce9d3cc7519585c939d3d40275b8bffe3c Mon Sep 17 00:00:00 2001 From: luqichao Date: Mon, 15 Jul 2024 17:25:46 +0800 Subject: [PATCH 044/347] optimize truncate --- src/bin/gs_guc/cluster_guc.conf | 1 + .../backend/utils/misc/guc/guc_storage.cpp | 13 +++++++++++++ .../storage/replication/walrcvwriter.cpp | 19 +++++++++++++++---- .../storage/replication/walreceiver.cpp | 1 + .../knl/knl_guc/knl_instance_attr_storage.h | 1 + src/include/replication/walreceiver.h | 1 + 6 files changed, 32 insertions(+), 4 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index e800cc4995..787b41be60 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -815,6 +815,7 @@ uwal_log_path|string|0,0|NULL|NULL| uwal_rpc_compression_switch|bool|0,0|NULL|NULL| uwal_rpc_flowcontrol_switch|bool|0,0|NULL|NULL| uwal_rpc_flowcontrol_value|int|8,2048|NULL|NULL| +uwal_truncate_interval|int|0,7200|NULL|NULL| uwal_async_append_switch|bool|0,0|NULL|NULL| enable_gazelle_performance_mode|bool|0,0|NULL|NULL| [cmserver] diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 7742319b41..9a68e7563f 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -4067,6 +4067,19 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, + {{"uwal_truncate_interval", + PGC_POSTMASTER, + NODE_SINGLENODE, + UWAL, + gettext_noop("Interval(s) for moving xlog files"), + NULL}, + &g_instance.attr.attr_storage.uwal_truncate_interval, + 60, + 0, + 7200, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, (GucContext)0, diff --git a/src/gausskernel/storage/replication/walrcvwriter.cpp b/src/gausskernel/storage/replication/walrcvwriter.cpp index 8d8a63742d..01e6e3ba97 100755 --- a/src/gausskernel/storage/replication/walrcvwriter.cpp +++ b/src/gausskernel/storage/replication/walrcvwriter.cpp @@ -1021,14 +1021,15 @@ int walRcvWriteUwal(WalRcvCtlBlock *walrcb, UwalrcvWriterState *uwalrcv, UwalInf nbytes = (int)Min((int64)MaxReadUwalBytes, Min(uwalFreeOffset, readLen)); if (startPtr / XLogSegSize != t_thrd.xlog_cxt.uwalInfo.info.startWriteOffset / XLogSegSize) { if (startPtr / XLogSegSize < writePtr / XLogSegSize) { - startPtr = (startPtr / XLogSegSize + 1) * XLogSegSize; + uint64_t segNum = (writePtr / XLogSegSize) - (startPtr / XLogSegSize); + startPtr = (writePtr / XLogSegSize) * XLogSegSize; SpinLockAcquire(&uwalrcv->mutex); uwalrcv->startPtr = startPtr; uwalrcv->flushPtr = startPtr; uwalrcv->writeNoWait = false; SpinLockRelease(&uwalrcv->mutex); END_CRIT_SECTION(); - return XLogSegSize; + return segNum * XLogSegSize; } END_CRIT_SECTION(); return 0; @@ -1170,6 +1171,9 @@ int uwalRcvStateInit(UwalrcvWriterState *uwalrcv, UwalInfo info) uwalrcv->needQuery = false; uwalrcv->needXlogCatchup = true; SpinLockRelease(&uwalrcv->mutex); + struct timeval tv; + gettimeofday(&tv, NULL); + uwalrcv->truncateTimeStamp = tv.tv_sec; SpinLockAcquire(&uwalrcv->writeMutex); uwalrcv->writePtr = info.info.writeOffset; @@ -1386,7 +1390,7 @@ static int WalRcvUwalTruncate(WalRcvCtlBlock *walrcb, UwalrcvWriterState *uwalrc } int loop = 0; - while (needQuery && loop++ < 20) { + while (needQuery && loop++ < 2) { ret = GsUwalQuery(&info->id, &(info->info)); if (ret != 0) { ereport(WARNING, (errmsg("GsUwalQuery return failed"))); @@ -1408,7 +1412,12 @@ static int WalRcvUwalTruncate(WalRcvCtlBlock *walrcb, UwalrcvWriterState *uwalrc pg_usleep(100000); } - if (needQuery || (startPtr / XLogSegSize == truncatePtr / XLogSegSize)) { + struct timeval tv; + gettimeofday(&tv, NULL); + uint64_t elapsed = tv.tv_sev - uwalrcv->truncateTimeStamp; + + if (needQuery || (startPtr / XLogSegSize == truncatePtr / XLogSegSize) + || elapsed < g_instance.attr.attr_storage.uwal_truncate_interval) { return 0; } @@ -1421,6 +1430,8 @@ static int WalRcvUwalTruncate(WalRcvCtlBlock *walrcb, UwalrcvWriterState *uwalrc } START_CRIT_SECTION(); ret = GsUwalTruncate(&(info->id), startPtr); + gettimeofday(&tv, NULL); + uwalrcv->truncateTimeStamp = tv.tv_sec; if (0 != ret) { ereport(LOG, (errmsg("WalRcvUwalTruncate failed retCode: %d", ret))); END_CRIT_SECTION(); diff --git a/src/gausskernel/storage/replication/walreceiver.cpp b/src/gausskernel/storage/replication/walreceiver.cpp index 08712c2065..82ce0a062d 100755 --- a/src/gausskernel/storage/replication/walreceiver.cpp +++ b/src/gausskernel/storage/replication/walreceiver.cpp @@ -178,6 +178,7 @@ const char *g_reserve_param[] = { "uwal_rpc_compression_switch", "uwal_rpc_flowcontrol_switch", "uwal_rpc_flowcontrol_value", + "uwal_truncate_interval", "uwal_async_append_switch" }; diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index b7e0d80d14..e0c582915f 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -239,6 +239,7 @@ typedef struct knl_instance_attr_storage { bool uwal_rpc_compression_switch; bool uwal_rpc_flowcontrol_switch; int uwal_rpc_flowcontrol_value; + int uwal_truncate_interval; bool uwal_async_append_switch; } knl_instance_attr_storage; diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h index 9e18e08d52..370e2e34a2 100755 --- a/src/include/replication/walreceiver.h +++ b/src/include/replication/walreceiver.h @@ -125,6 +125,7 @@ typedef struct UwalrcvWriterState { bool needQuery; bool needXlogCatchup = true; bool fullSync = false; + uint64_t truncateTimeStamp; char uwalReceiverBuffer[FLEXIBLE_ARRAY_MEMBER]; } UwalrcvWriterState; -- Gitee From 0ab0395743aaa1aa125425969b7ee32ef4b31fca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=AD=90=E7=BB=B4?= Date: Mon, 15 Jul 2024 17:35:32 +0800 Subject: [PATCH 045/347] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=E3=80=90bugfix=E3=80=91=E4=BF=AE=E6=AD=A3?= =?UTF-8?q?=E6=B7=B7=E6=B7=86=E7=9A=84=E6=97=A5=E5=BF=97=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc/guc_storage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 7742319b41..83d5cab1d6 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -7180,7 +7180,7 @@ static bool check_ss_fi_cpu_latency_entries(char** newval, void** extra, GucSour } if (dms_fi_set_entries(DMS_FI_TYPE_CPU_LATENCY, entry_list, count) != DMS_SUCCESS) { - ereport(ERROR, (errmsg("set parameter ss_fi_net_latency_entries fail"))); + ereport(ERROR, (errmsg("set parameter ss_fi_cpu_latency_entries fail"))); return false; } return true; -- Gitee From 5852422880bc38265de68df71c8c4f868fe9154c Mon Sep 17 00:00:00 2001 From: luqichao Date: Mon, 15 Jul 2024 19:30:00 +0800 Subject: [PATCH 046/347] fix typo --- src/gausskernel/storage/replication/walrcvwriter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/replication/walrcvwriter.cpp b/src/gausskernel/storage/replication/walrcvwriter.cpp index 01e6e3ba97..d24cbd53a0 100755 --- a/src/gausskernel/storage/replication/walrcvwriter.cpp +++ b/src/gausskernel/storage/replication/walrcvwriter.cpp @@ -1414,7 +1414,7 @@ static int WalRcvUwalTruncate(WalRcvCtlBlock *walrcb, UwalrcvWriterState *uwalrc struct timeval tv; gettimeofday(&tv, NULL); - uint64_t elapsed = tv.tv_sev - uwalrcv->truncateTimeStamp; + uint64_t elapsed = tv.tv_sec - uwalrcv->truncateTimeStamp; if (needQuery || (startPtr / XLogSegSize == truncatePtr / XLogSegSize) || elapsed < g_instance.attr.attr_storage.uwal_truncate_interval) { -- Gitee From 891c47ff72fdaa25782eee74fcc6b1fa85ade430 Mon Sep 17 00:00:00 2001 From: yaoxin Date: Tue, 16 Jul 2024 09:43:48 +0800 Subject: [PATCH 047/347] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=97=A0=E7=94=A8ass?= =?UTF-8?q?ert?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/ustore/knl_uvisibility.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index 0fe94fd285..738841c31b 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -243,7 +243,6 @@ bool UHeapTupleSatisfiesVisibility(UHeapTuple uhtup, Snapshot snapshot, Buffer b if (utuple != NULL && TransactionIdIsNormal(fxid) && IsMVCCSnapshot(snapshot) && SINGLE_LOCKER_XID_IS_EXCL_LOCKED(utuple->disk_tuple->flag)) { Assert(UHEAP_XID_IS_EXCL_LOCKED(utuple->disk_tuple->flag)); - Assert(!UHEAP_XID_IS_TRANS(utuple->disk_tuple->flag)); lockerXid = UHeapTupleGetRawXid(utuple); tupleIsExclusivelyLocked = true; } @@ -1262,7 +1261,6 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer UHeapTupleStatus tupleStatus = UHeapTupleGetStatus(utuple); /* tuple is no longer locked by a single locker */ if (tupleStatus != UHEAPTUPLESTATUS_LOCKED && SINGLE_LOCKER_XID_IS_EXCL_LOCKED(tupleData->flag)) { - Assert(!UHEAP_XID_IS_TRANS(utuple->disk_tuple->flag)); UHeapTupleHeaderClearSingleLocker(tupleData); } -- Gitee From 0b1a2bd4d3fcb616e03274f966399328640b3d0f Mon Sep 17 00:00:00 2001 From: yaoxin Date: Tue, 16 Jul 2024 09:48:10 +0800 Subject: [PATCH 048/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8DSPI=E5=9C=BA=E6=99=AF?= =?UTF-8?q?=E6=89=A7=E8=A1=8C=E5=AD=90=E4=BA=8B=E5=8A=A1=E6=89=A7=E8=A1=8C?= =?UTF-8?q?release=20savepoint=E6=97=B6=E5=80=99core=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index c94d150eb3..0550ad1e40 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -15735,7 +15735,11 @@ static int exec_stmt_savepoint(PLpgSQL_execstate *estate, PLpgSQL_stmt* stmt) * Revert to outer eval_econtext. (The inner one was * automatically cleaned up during subxact exit.) */ - estate->eval_econtext = u_sess->plsql_cxt.simple_econtext_stack->stack_econtext; + if (u_sess->plsql_cxt.simple_econtext_stack == NULL) { + estate->eval_econtext = NULL; + } else { + estate->eval_econtext = u_sess->plsql_cxt.simple_econtext_stack->stack_econtext; + } break; default: ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), -- Gitee From 147691ea9c76c4e177f3a49c36ecf1579fb51fc4 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Tue, 16 Jul 2024 10:11:26 +0800 Subject: [PATCH 049/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4switchover?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_callback.cpp | 3 --- src/gausskernel/process/postmaster/postmaster.cpp | 5 +++++ src/gausskernel/storage/access/transam/xlog.cpp | 10 ++++++++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 55cf1857df..596064c17a 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -486,9 +486,6 @@ static void CBSwitchoverResult(void *db_handle, int result) } else { /* abort and restore state */ g_instance.dms_cxt.SSClusterState = NODESTATE_NORMAL; - if (SS_DISASTER_STANDBY_CLUSTER) { - g_instance.dms_cxt.SSReformInfo.in_reform = false; - } ereport(WARNING, (errmodule(MOD_DMS), errmsg("[SS reform][SS switchover] Switchover failed," "errno: %d.", result))); } diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index 16ae58749a..8a081a8967 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -8856,6 +8856,11 @@ static void PostmasterStateMachine(void) */ if (g_instance.demotion > NoDemote && pmState == PM_NO_CHILDREN) { ereport(LOG, (errmsg("all server processes terminated; reinitializing"))); + + if (SS_DISASTER_STANDBY_CLUSTER && g_instance.dms_cxt.SSClusterState == NODESTATE_PROMOTE_APPROVE) { + pmState = PM_RUN; + return; + } /* cause gpc scheduler use lwlock, so before reset shared memory(still has lwlock), get gpc_reset_lock and reset gpc */ if (ENABLE_GPC) { diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 2c2be96c3d..c7fbfba700 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -3489,6 +3489,12 @@ void XLogWaitFlush(XLogRecPtr recptr) return; } + /* SS standby cluster sometime is not in recovery, we also dont need wait here */ + if (SS_DISASTER_STANDBY_CLUSTER && !g_instance.dms_cxt.SSRecoveryInfo.disaster_cluster_promoting) { + ereport(DEBUG1, (errmsg("[SS_DISASTER_CLUSTER] SS standby cluster needless wait xlog flush"))); + return; + } + volatile XLogRecPtr flushTo = gs_compare_and_swap_u64(&g_instance.wal_cxt.flushResult, 0, 0); while (XLByteLT(flushTo, recptr)) { @@ -13054,8 +13060,8 @@ bool CreateRestartPoint(int flags) * Check that we're still in recovery mode. It's ok if we exit recovery * mode after this check, the restart point is valid anyway. */ - if (!recoveryInProgress) { - ereport(DEBUG2, (errmsg("skipping restartpoint, recovery has already ended"))); + if (!recoveryInProgress && !SS_DISASTER_STANDBY_CLUSTER) { + ereport(LOG, (errmsg("skipping restartpoint, recovery has already ended"))); LWLockRelease(CheckpointLock); gstrace_exit(GS_TRC_ID_CreateRestartPoint); return false; -- Gitee From d1d7815379a6c9a522d2668ab48d1cc3e132787e Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Tue, 16 Jul 2024 11:35:36 +0800 Subject: [PATCH 050/347] =?UTF-8?q?=E6=94=AF=E6=8C=81=E4=BB=A5f,d=E4=BD=9C?= =?UTF-8?q?=E4=B8=BA=E6=B5=AE=E7=82=B9=E6=95=B0=EF=BC=8C=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E6=B5=AE=E7=82=B9=E6=95=B0=E7=89=B9=E6=AE=8A=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + src/common/backend/parser/gram.y | 20 +- src/common/backend/parser/parse_expr.cpp | 47 + src/common/backend/parser/parser.cpp | 4 +- src/common/backend/parser/scan.l | 80 ++ src/common/backend/utils/misc/guc/guc_sql.cpp | 11 + .../interfaces/libpq/frontend_parser/gram.y | 14 +- src/common/pl/plpgsql/src/gram.y | 2 +- .../knl/knl_guc/knl_session_attr_sql.h | 1 + src/include/parser/kwlist.h | 2 + .../regress/expected/test_binary_suffix.out | 917 ++++++++++++++++++ src/test/regress/parallel_schedule0A | 3 + src/test/regress/sql/test_binary_suffix.sql | 356 +++++++ 13 files changed, 1452 insertions(+), 6 deletions(-) create mode 100644 src/test/regress/expected/test_binary_suffix.out create mode 100644 src/test/regress/sql/test_binary_suffix.sql diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 787b41be60..25fe528917 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -283,6 +283,7 @@ force_bitmapand|bool|0,0|NULL|NULL| from_collapse_limit|int|1,2147483647|NULL|NULL| fsync|bool|0,0|NULL|Using the fsync() system function can guarantee that when the operating system exception or hardware crash occurs, you can restore data to a consistent state. When fsync set to off, unable to restore the original data when the system crashes, it will cause the database unusable.| full_page_writes|bool|0,0|NULL|When full_page_writes set to off, unable to restore the original data when the system crashes, it will cause the database unusable.| +float_suffix_acceptance|bool|0,0|NULL|NULL| geqo|bool|0,0|NULL|Usually geqo do not set to off in the implementation process, geqo_threshold variable provides a more sophisticated method of control GEQO.| geqo_effort|int|1,10|NULL|NULL| geqo_generations|int|0,2147483647|NULL|NULL| diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 7b3d52ea34..08060d8037 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -868,7 +868,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); * DOT_DOT is unused in the core SQL grammar, and so will always provoke * parse errors. It is needed by PL/pgsql. */ -%token IDENT FCONST SCONST BCONST VCONST XCONST Op CmpOp CmpNullOp COMMENTSTRING SET_USER_IDENT SET_IDENT UNDERSCORE_CHARSET +%token IDENT FCONST SCONST BCONST VCONST XCONST Op CmpOp CmpNullOp COMMENTSTRING SET_USER_IDENT SET_IDENT UNDERSCORE_CHARSET FCONST_F FCONST_D %token ICONST PARAM %token TYPECAST ORA_JOINOP DOT_DOT COLON_EQUALS PARA_EQUALS SET_IDENT_SESSION SET_IDENT_GLOBAL @@ -885,7 +885,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); AGGREGATE ALGORITHM ALL ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APP APPEND ARCHIVE ARRAY AS ASC ASSERTION ASSIGNMENT ASYMMETRIC AT ATTRIBUTE AUDIT AUTHID AUTHORIZATION AUTOEXTEND AUTOMAPPED AUTO_INCREMENT - BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_INTEGER BIT BLANKS + BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_DOUBLE_INF BINARY_DOUBLE_NAN BINARY_INTEGER BIT BLANKS BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CATALOG_NAME CHAIN CHANGE CHAR_P @@ -12304,6 +12304,8 @@ TriggerFuncArg: $$ = makeString(pstrdup(buf)); } | FCONST { $$ = makeString($1); } + | FCONST_F { $$ = makeString($1); } + | FCONST_D { $$ = makeString($1); } | Sconst { $$ = makeString($1); } | ColLabel { $$ = makeString($1); } ; @@ -27070,6 +27072,16 @@ a_expr: c_expr { $$ = $1; } n->model_args_location = @6; $$ = (Node*) n; } + | FCONST_F + { + Node *num = makeFloatConst($1, @1); + $$ = makeTypeCast(num, SystemTypeName("float4"), @1); + } + | FCONST_D + { + Node *num = makeFloatConst($1, @1); + $$ = makeTypeCast(num, SystemTypeName("float8"), @1); + } ; /* @@ -30291,6 +30303,8 @@ col_name_keyword: BETWEEN | BIGINT | BINARY_DOUBLE + | BINARY_DOUBLE_INF + | BINARY_DOUBLE_NAN | BINARY_INTEGER | BIT | BOOLEAN_P @@ -30362,6 +30376,8 @@ col_name_keyword_nonambiguous: BETWEEN | BIGINT | BINARY_DOUBLE + | BINARY_DOUBLE_INF + | BINARY_DOUBLE_NAN | BINARY_INTEGER | BIT | BOOLEAN_P diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 7a55479be9..de6a7f6560 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -56,6 +56,12 @@ #include "utils/varbit.h" #include "tcop/tcopprot.h" +typedef Node* (*DriverTransFunction)(ParseState* pstate, ColumnRef *cref, char *colname); +typedef struct { + char *keyword; + DriverTransFunction function; +} ColnameTransition; + extern Node* build_column_default(Relation rel, int attrno, bool isInsertCmd = false, bool needOnUpdate = false); extern Node* makeAConst(Value* v, int location); extern Value* makeStringValue(char* str); @@ -104,6 +110,15 @@ static Node* tryTransformFunc(ParseState* pstate, List* fields, int location); static void SubCheckOutParam(List* exprtargs, Oid funcid); static Node* transformPrefixKey(ParseState* pstate, PrefixKey* pkey); static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cursor_expression); +static Node* transformStringCast(ParseState* pstate, char *str, int location, TypeName *typname); +static Node* transformBinaryDoubleInf(ParseState* pstate, ColumnRef *cref, char *colname); +static Node* transformBinaryDoubleNan(ParseState* pstate, ColumnRef *cref, char *colname); + +ColnameTransition predicateTable[] = { + { "binary_double_infinity", transformBinaryDoubleInf }, + { "binary_double_nan", transformBinaryDoubleNan } +}; +#define PREDICATE_COUNT (int)(sizeof(predicateTable) / sizeof(predicateTable[0])) #define OrientedIsCOLorPAX(rte) ((rte)->orientation == REL_COL_ORIENTED || (rte)->orientation == REL_PAX_ORIENTED) #define INDEX_KEY_MAX_PREFIX_LENGTH (int)2676 @@ -866,6 +881,15 @@ Node* transformColumnRef(ParseState* pstate, ColumnRef* cref) AssertEreport(IsA(field1, String), MOD_OPT, ""); colname = strVal(field1); + for (int i = 0; i < PREDICATE_COUNT; i++) { + if(strcmp(colname, predicateTable[i].keyword) == 0){ + node = predicateTable[i].function(pstate, cref, colname); + } + } + if (node != NULL) { + break; + } + if (pstate->p_hasStartWith || pstate->p_split_where_for_swcb) { Node *expr = NULL; if (pstate->p_hasStartWith) { @@ -3854,6 +3878,29 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur return (Node*)newm; } +static Node* transformStringCast(ParseState* pstate, char *str, int location, TypeName *typname) +{ + A_Const *n = makeNode(A_Const); + n->val.type = T_String; + n->val.val.str = str; + n->location = location; + + TypeCast *tc = makeNode(TypeCast); + tc->arg = (Node *)n; + tc->typname = typname; + tc->location = location; + + return transformTypeCast(pstate, tc); +} + +static Node* transformBinaryDoubleInf(ParseState* pstate, ColumnRef *cref, char *colname) { + return transformStringCast(pstate, "infinity", cref->location, SystemTypeName("float8")); +} + +static Node* transformBinaryDoubleNan(ParseState* pstate, ColumnRef *cref, char *colname) { + return transformStringCast(pstate, "nan", cref->location, SystemTypeName("float8")); +} + /* * Produce a string identifying an expression by kind. diff --git a/src/common/backend/parser/parser.cpp b/src/common/backend/parser/parser.cpp index e4aea7d0a4..e4fe525536 100644 --- a/src/common/backend/parser/parser.cpp +++ b/src/common/backend/parser/parser.cpp @@ -877,7 +877,9 @@ char** get_next_snippet( } const struct ignore_keyword_opt_data ignore_keywords[] = { - {"interval", INTERVAL} + {"interval", INTERVAL}, + {"binary_double_infinity", BINARY_DOUBLE_INF}, + {"binary_double_nan", BINARY_DOUBLE_NAN} }; #define INGORE_KEYWORDS_LEN (sizeof(ignore_keywords) / sizeof(struct ignore_keyword_opt_data)) diff --git a/src/common/backend/parser/scan.l b/src/common/backend/parser/scan.l index 69b378c110..8fa0db4d43 100755 --- a/src/common/backend/parser/scan.l +++ b/src/common/backend/parser/scan.l @@ -112,6 +112,8 @@ static bool is_utf16_surrogate_second(pg_wchar c); static pg_wchar surrogate_pair_to_codepoint(pg_wchar first, pg_wchar second); static void addunicode(pg_wchar c, yyscan_t yyscanner); static void set_is_delimiter_name(char* text, core_yyscan_t yyscanner ); +static int process_decimal_float(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); +static int process_decimal_double(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); #define yyerror(msg) scanner_yyerror(msg, yyscanner) @@ -375,9 +377,13 @@ operator {op_chars}+ integer {digit}+ decimal (({digit}*\.{digit}+)|({digit}+\.{digit}*)) decimalfail {digit}+\.\. +decimalf ({integer}|{decimal})[fF] +decimald ({integer}|{decimal})[dD] real ({integer}|{decimal})[Ee][-+]?{digit}+ realfail1 ({integer}|{decimal})[Ee] realfail2 ({integer}|{decimal})[Ee][-+] +realf ({real})[fF] +reald ({real})[dD] snapvers ({digit}+(\.{digit}+)*) param \${integer} @@ -1045,6 +1051,62 @@ other . yyextra->is_hint_str = false; return process_integer_literal(yytext, yylval); } +{decimalf} { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && + u_sess->attr.attr_sql.float_suffix_acceptance) { + SET_YYLLOC(); + yyextra->is_hint_str = false; + return process_decimal_float(yytext, yyleng, yyscanner, yylval); + } else { + yyless(yyleng-1); + SET_YYLLOC(); + yylval->str = pstrdup(yytext); + yyextra->is_hint_str = false; + return FCONST; + } + } +{realf} { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && + u_sess->attr.attr_sql.float_suffix_acceptance) { + SET_YYLLOC(); + yyextra->is_hint_str = false; + return process_decimal_float(yytext, yyleng, yyscanner, yylval); + } else { + yyless(yyleng-1); + SET_YYLLOC(); + yylval->str = pstrdup(yytext); + yyextra->is_hint_str = false; + return FCONST; + } + } +{decimald} { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && + u_sess->attr.attr_sql.float_suffix_acceptance) { + SET_YYLLOC(); + yyextra->is_hint_str = false; + return process_decimal_double(yytext, yyleng, yyscanner, yylval); + } else { + yyless(yyleng-1); + SET_YYLLOC(); + yylval->str = pstrdup(yytext); + yyextra->is_hint_str = false; + return FCONST; + } + } +{reald} { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && + u_sess->attr.attr_sql.float_suffix_acceptance) { + SET_YYLLOC(); + yyextra->is_hint_str = false; + return process_decimal_double(yytext, yyleng, yyscanner, yylval); + } else { + yyless(yyleng-1); + SET_YYLLOC(); + yylval->str = pstrdup(yytext); + yyextra->is_hint_str = false; + return FCONST; + } + } {real} { SET_YYLLOC(); yylval->str = pstrdup(yytext); @@ -1533,6 +1595,24 @@ process_integer_literal(const char *token, YYSTYPE *lval) return ICONST; } +static int +process_decimal_float(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval) +{ + startlit(); + addlit(token, len-1, yyscanner); + lval->str = litbufdup(yyscanner); + return FCONST_F; +} + +static int +process_decimal_double(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval) +{ + startlit(); + addlit(token, len-1, yyscanner); + lval->str = litbufdup(yyscanner); + return FCONST_D; +} + static unsigned int hexval(unsigned char c) { diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index fbf0596785..8a989d8201 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -1257,6 +1257,17 @@ static void InitSqlConfigureNamesBool() true, NULL, NULL}, + {{"float_suffix_acceptance", + PGC_USERSET, + NODE_ALL, + QUERY_TUNING_COST, + gettext_noop("Allows the user to specify the value of a double-precision floating-point parameter."), + NULL}, + &u_sess->attr.attr_sql.float_suffix_acceptance, + false, + NULL, + NULL, + NULL}, {{"enable_slot_log", PGC_USERSET, NODE_ALL, diff --git a/src/common/interfaces/libpq/frontend_parser/gram.y b/src/common/interfaces/libpq/frontend_parser/gram.y index 523899fe07..001b2dd4c1 100755 --- a/src/common/interfaces/libpq/frontend_parser/gram.y +++ b/src/common/interfaces/libpq/frontend_parser/gram.y @@ -495,7 +495,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; * DOT_DOT is unused in the core SQL grammar, and so will always provoke * parse errors. It is needed by PL/pgsql. */ -%token IDENT FCONST SCONST BCONST XCONST Op CmpOp COMMENTSTRING SET_USER_IDENT SET_IDENT UNDERSCORE_CHARSET +%token IDENT FCONST SCONST BCONST XCONST Op CmpOp COMMENTSTRING SET_USER_IDENT SET_IDENT UNDERSCORE_CHARSET FCONST_F FCONST_D %token ICONST PARAM %token TYPECAST ORA_JOINOP DOT_DOT COLON_EQUALS PARA_EQUALS SET_IDENT_SESSION SET_IDENT_GLOBAL @@ -512,7 +512,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; AGGREGATE ALGORITHM ALL ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APP APPEND ARCHIVE ARRAY AS ASC ASSERTION ASSIGNMENT ASYMMETRIC AT ATTRIBUTE AUDIT AUDIT_POLICY AUTHID AUTHORIZATION AUTOEXTEND AUTOMAPPED AUTO_INCREMENT - BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_INTEGER BIT BLANKS BLOB_P BLOCKCHAIN BODY_P BOGUS + BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_DOUBLE_INF BINARY_DOUBLE_NAN BINARY_INTEGER BIT BLANKS BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CATALOG_NAME CHAIN CHANGE CHAR_P @@ -9527,6 +9527,16 @@ a_expr: c_expr { $$ = $1; } list_make1($1), @2), @2); } + | FCONST_F + { + Node *num = makeFloatConst($1, @1); + $$ = makeTypeCast(num, SystemTypeName("float4"), @1); + } + | FCONST_D + { + Node *num = makeFloatConst($1, @1); + $$ = makeTypeCast(num, SystemTypeName("float8"), @1); + } ; /* diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index b2ab0c606f..53550cd02b 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -428,7 +428,7 @@ static void processFunctionRecordOutParam(int varno, Oid funcoid, int* outparam) * Some of these are not directly referenced in this file, but they must be * here anyway. */ -%token IDENT FCONST SCONST BCONST VCONST XCONST Op CmpOp CmpNullOp COMMENTSTRING SET_USER_IDENT SET_IDENT UNDERSCORE_CHARSET +%token IDENT FCONST SCONST BCONST VCONST XCONST Op CmpOp CmpNullOp COMMENTSTRING SET_USER_IDENT SET_IDENT UNDERSCORE_CHARSET FCONST_F FCONST_D %token ICONST PARAM %token TYPECAST ORA_JOINOP DOT_DOT COLON_EQUALS PARA_EQUALS SET_IDENT_SESSION SET_IDENT_GLOBAL diff --git a/src/include/knl/knl_guc/knl_session_attr_sql.h b/src/include/knl/knl_guc/knl_session_attr_sql.h index 35a5b44b1a..e93f14bbd1 100644 --- a/src/include/knl/knl_guc/knl_session_attr_sql.h +++ b/src/include/knl/knl_guc/knl_session_attr_sql.h @@ -130,6 +130,7 @@ typedef struct knl_session_attr_sql { bool lo_compat_privileges; bool quote_all_identifiers; bool enforce_a_behavior; + bool float_suffix_acceptance; bool enable_slot_log; bool convert_string_to_digit; bool agg_redistribute_enhancement; diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index 23082f704e..76e29f5080 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -73,6 +73,8 @@ PG_KEYWORD("between", BETWEEN, COL_NAME_KEYWORD) PG_KEYWORD("bigint", BIGINT, COL_NAME_KEYWORD) PG_KEYWORD("binary", BINARY, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("binary_double", BINARY_DOUBLE, COL_NAME_KEYWORD) +PG_KEYWORD("binary_double_infinity", BINARY_DOUBLE_INF, COL_NAME_KEYWORD) +PG_KEYWORD("binary_double_nan", BINARY_DOUBLE_NAN, COL_NAME_KEYWORD) PG_KEYWORD("binary_integer", BINARY_INTEGER, COL_NAME_KEYWORD) PG_KEYWORD("bit", BIT, COL_NAME_KEYWORD) PG_KEYWORD("blanks", BLANKS, UNRESERVED_KEYWORD) diff --git a/src/test/regress/expected/test_binary_suffix.out b/src/test/regress/expected/test_binary_suffix.out new file mode 100644 index 0000000000..3de471e99b --- /dev/null +++ b/src/test/regress/expected/test_binary_suffix.out @@ -0,0 +1,917 @@ +-- create new schema +drop schema if exists test_binary; +NOTICE: schema "test_binary" does not exist, skipping +create schema test_binary; +set search_path=test_binary; +set float_suffix_acceptance to on; +SELECT 3.14f; + float4 +-------- + 3.14 +(1 row) + +SELECT 10.0d; + float8 +-------- + 10 +(1 row) + +SELECT -2.5f; + ?column? +---------- + -2.5 +(1 row) + +SELECT -10.0d; + ?column? +---------- + -10 +(1 row) + +SELECT 1f; + float4 +-------- + 1 +(1 row) + +SELECT 2d; + float8 +-------- + 2 +(1 row) + +SELECT -3F; + ?column? +---------- + -3 +(1 row) + +SELECT -4D; + ?column? +---------- + -4 +(1 row) + +SELECT 123e3d; + float8 +-------- + 123000 +(1 row) + +SELECT 5.5df; + f +----- + 5.5 +(1 row) + +SELECT 3.6D; + float8 +-------- + 3.6 +(1 row) + +SELECT 8.6fabc; + abc +----- + 8.6 +(1 row) + +CREATE TABLE test_table ( + id INT, + float_value FLOAT, + double_value BINARY_DOUBLE +); +INSERT INTO test_table (id, float_value, double_value) VALUES (1, 3.14f, 2.7182d); +SELECT float_value * 2d, double_value + 1.5f FROM test_table; + ?column? | ?column? +------------------+---------- + 6.28000020980835 | 4.2182 +(1 row) + +DROP TABLE test_table; +SELECT round(3.14159f, 2); + round +------- + 3.14 +(1 row) + +SELECT trunc(3.14159f, 2); + trunc +------- + 3.14 +(1 row) + +SELECT sqrt(4.0f); + sqrt +------ + 2 +(1 row) + +SELECT power(2.0f, 3.0f); + power +------- + 8 +(1 row) + +SELECT sin(0.5f); + sin +------------------ + .479425538604203 +(1 row) + +SELECT cos(0.5f); + cos +------------------ + .877582561890373 +(1 row) + +SELECT log(10.0f); + log +----- + 1 +(1 row) + +SELECT exp(2.0f); + exp +------------------ + 7.38905609893065 +(1 row) + +CREATE FUNCTION test_function(input_val float) RETURNS float AS $$ +BEGIN + RETURN input_val * 2.0f; +END; +$$ LANGUAGE plpgsql; +SELECT test_function(10.5f); + test_function +--------------- + 21 +(1 row) + +drop function test_function; +CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); +CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); +CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +$$ +DECLARE + input_value FLOAT; +BEGIN + input_value := TG_ARGV[0]::FLOAT; + INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3, input_value); + RETURN NEW; +END +$$ LANGUAGE PLPGSQL; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5f); +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5d); +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +drop function tri_insert_func; +drop table test_trigger_src_tbl; +drop table test_trigger_des_tbl; +SELECT 1.5f = 1.5; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT 1.5f <> 2.0; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT 1.5f > 1.0; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT 1.5f < 2.0; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT (1.5f > 1.0) AND (2.5f < 3.0); -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT (1.5f > 1.0) OR (2.5f > 3.0); -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT 1.5f + 2.5; -- 返回 4.0 + ?column? +---------- + 4 +(1 row) + +SELECT 3.5f - 1.5; -- 返回 2.0 + ?column? +---------- + 2 +(1 row) + +SELECT 2.0f * 3.0; -- 返回 6.0 + ?column? +---------- + 6 +(1 row) + +SELECT 4.0f / 2.0; -- 返回 2.0 + ?column? +---------- + 2 +(1 row) + +set float_suffix_acceptance to off; +SELECT 3.14f; + f +------ + 3.14 +(1 row) + +SELECT 10.0d; + d +------ + 10.0 +(1 row) + +SELECT -2.5f; + f +------ + -2.5 +(1 row) + +SELECT -10.0d; + d +------- + -10.0 +(1 row) + +SELECT 1f; + f +--- + 1 +(1 row) + +SELECT 2d; + d +--- + 2 +(1 row) + +SELECT -3F; + f +---- + -3 +(1 row) + +SELECT -4D; + d +---- + -4 +(1 row) + +SELECT 123e3d; + d +-------- + 123000 +(1 row) + +SELECT 5.5df; + df +----- + 5.5 +(1 row) + +SELECT 3.6D; + d +----- + 3.6 +(1 row) + +SELECT 8.6fabc; + fabc +------ + 8.6 +(1 row) + +INSERT INTO test_table (id, float_value, double_value) VALUES (1, 3.14f, 2.7182d); +ERROR: syntax error at or near "f" +LINE 1: ...le (id, float_value, double_value) VALUES (1, 3.14f, 2.7182d... + ^ +SELECT float_value * 2d, double_value + 1.5f FROM test_table; +ERROR: relation "test_table" does not exist on datanode1 +LINE 1: SELECT float_value * 2d, double_value + 1.5f FROM test_table... + ^ +DROP TABLE test_table; +ERROR: table "test_table" does not exist +SELECT round(3.14159f, 2); +ERROR: syntax error at or near "f" +LINE 1: SELECT round(3.14159f, 2); + ^ +SELECT trunc(3.14159f, 2); +ERROR: syntax error at or near "f" +LINE 1: SELECT trunc(3.14159f, 2); + ^ +SELECT sqrt(4.0f); +ERROR: syntax error at or near "f" +LINE 1: SELECT sqrt(4.0f); + ^ +SELECT power(2.0f, 3.0f); +ERROR: syntax error at or near "f" +LINE 1: SELECT power(2.0f, 3.0f); + ^ +SELECT sin(0.5f); +ERROR: syntax error at or near "f" +LINE 1: SELECT sin(0.5f); + ^ +SELECT cos(0.5f); +ERROR: syntax error at or near "f" +LINE 1: SELECT cos(0.5f); + ^ +SELECT log(10.0f); +ERROR: syntax error at or near "f" +LINE 1: SELECT log(10.0f); + ^ +SELECT exp(2.0f); +ERROR: syntax error at or near "f" +LINE 1: SELECT exp(2.0f); + ^ +CREATE FUNCTION test_function(input_val float) RETURNS float AS $$ +BEGIN + RETURN input_val * 2.0f; +END; +$$ LANGUAGE plpgsql; +SELECT test_function(10.5f); +ERROR: syntax error at or near "f" +LINE 1: SELECT test_function(10.5f); + ^ +DROP FUNCTION test_function; +CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); +CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); +CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +$$ +DECLARE + input_value FLOAT; +BEGIN + input_value := TG_ARGV[0]::FLOAT; + INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3, input_value); + RETURN NEW; +END +$$ LANGUAGE PLPGSQL; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5f); +ERROR: syntax error at or near "f" +LINE 4: EXECUTE PROCEDURE tri_insert_func(2.5f); + ^ +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +ERROR: trigger "insert_trigger" for table "test_trigger_src_tbl" does not exist +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5d); +ERROR: syntax error at or near "d" +LINE 4: EXECUTE PROCEDURE tri_insert_func(2.5d); + ^ +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +ERROR: trigger "insert_trigger" for table "test_trigger_src_tbl" does not exist +drop function tri_insert_func; +drop table test_trigger_src_tbl; +drop table test_trigger_des_tbl; +SELECT 1.5f = 1.5; +ERROR: syntax error at or near "=" +LINE 1: SELECT 1.5f = 1.5; + ^ +SELECT 1.5f <> 2.0; +ERROR: syntax error at or near "<>" +LINE 1: SELECT 1.5f <> 2.0; + ^ +SELECT 1.5f > 1.0; +ERROR: syntax error at or near ">" +LINE 1: SELECT 1.5f > 1.0; + ^ +SELECT 1.5f < 2.0; +ERROR: syntax error at or near "<" +LINE 1: SELECT 1.5f < 2.0; + ^ +SELECT (1.5f > 1.0) AND (2.5f < 3.0); +ERROR: syntax error at or near "f" +LINE 1: SELECT (1.5f > 1.0) AND (2.5f < 3.0); + ^ +SELECT (1.5f > 1.0) OR (2.5f > 3.0); +ERROR: syntax error at or near "f" +LINE 1: SELECT (1.5f > 1.0) OR (2.5f > 3.0); + ^ +SELECT 1.5f + 2.5; +ERROR: syntax error at or near "+" +LINE 1: SELECT 1.5f + 2.5; + ^ +SELECT 3.5f - 1.5; +ERROR: syntax error at or near "-" +LINE 1: SELECT 3.5f - 1.5; + ^ +SELECT 2.0f * 3.0; +ERROR: syntax error at or near "*" +LINE 1: SELECT 2.0f * 3.0; + ^ +SELECT 4.0f / 2.0; +ERROR: syntax error at or near "/" +LINE 1: SELECT 4.0f / 2.0; + ^ +set float_suffix_acceptance to on; +SELECT BINARY_DOUBLE_INFINITY; + binary_double_infinity +------------------------ + Infinity +(1 row) + +SELECT BINARY_DOUBLE_NAN; + binary_double_nan +------------------- + NaN +(1 row) + +CREATE TABLE test_table ( + id INT, + float_value binary_double +); +INSERT INTO test_table (id, float_value) +VALUES (1, BINARY_DOUBLE_NAN); +SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_NAN; + id | float_value +----+------------- + 1 | NaN +(1 row) + +INSERT INTO test_table (id, float_value) +VALUES (2, BINARY_DOUBLE_INFINITY); +SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_INFINITY; + id | float_value +----+------------- + 2 | Infinity +(1 row) + +SELECT float_value + 1 FROM test_table WHERE id = 1; + ?column? +---------- + NaN +(1 row) + +SELECT float_value * 2 FROM test_table WHERE id = 2; + ?column? +---------- + Infinity +(1 row) + +SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_NAN OR float_value = BINARY_DOUBLE_INFINITY; + id | float_value +----+------------- + 1 | NaN + 2 | Infinity +(2 rows) + +DROP TABLE test_table; +SELECT ABS(binary_double_infinity); + abs +---------- + Infinity +(1 row) + +SELECT CEIL(binary_double_infinity); + ceil +---------- + Infinity +(1 row) + +SELECT SQRT(binary_double_infinity); + sqrt +---------- + Infinity +(1 row) + +SELECT FLOOR(binary_double_infinity); + floor +---------- + Infinity +(1 row) + +SELECT SQRT(binary_double_infinity); + sqrt +---------- + Infinity +(1 row) + +SELECT EXP(binary_double_infinity); + exp +---------- + Infinity +(1 row) + +SELECT LOG(binary_double_infinity); + log +---------- + Infinity +(1 row) + +SELECT POWER(binary_double_infinity, 2); + power +---------- + Infinity +(1 row) + +SELECT SIN(binary_double_infinity); +ERROR: input is out of range +CONTEXT: referenced column: sin +SELECT ABS(binary_double_nan); + abs +----- + NaN +(1 row) + +SELECT CEIL(binary_double_nan); + ceil +------ + NaN +(1 row) + +SELECT SQRT(binary_double_nan); + sqrt +------ + NaN +(1 row) + +SELECT FLOOR(binary_double_nan); + floor +------- + NaN +(1 row) + +SELECT SQRT(binary_double_nan); + sqrt +------ + NaN +(1 row) + +SELECT EXP(binary_double_nan); + exp +----- + NaN +(1 row) + +SELECT LOG(binary_double_nan); + log +----- + NaN +(1 row) + +SELECT POWER(binary_double_nan, 2); + power +------- + NaN +(1 row) + +SELECT SIN(binary_double_nan); + sin +----- + NaN +(1 row) + +-- 创建自定义函数 +CREATE FUNCTION test_function(value FLOAT) + RETURNS FLOAT + LANGUAGE plpgsql +AS $$ +BEGIN + IF value = binary_double_infinity THEN + RETURN 1.0; + ELSE + RETURN 0.0; + END IF; +END; +$$; +SELECT test_function(binary_double_nan); + test_function +--------------- + 0 +(1 row) + +DROP FUNCTION test_function; +--表达式 +SELECT binary_double_infinity = binary_double_infinity; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT binary_double_nan = binary_double_nan; -- 返回 false + ?column? +---------- + t +(1 row) + +SELECT binary_double_nan = binary_double_infinity; -- 返回 false + ?column? +---------- + f +(1 row) + +SELECT binary_double_infinity <> 0.0; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT binary_double_nan <> binary_double_nan; -- 返回 true + ?column? +---------- + f +(1 row) + +SELECT binary_double_infinity > 0.0; -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT binary_double_nan < binary_double_infinity; -- 返回 false + ?column? +---------- + f +(1 row) + +SELECT (binary_double_infinity > 0.0) AND (binary_double_nan < binary_double_infinity); -- 返回 false + ?column? +---------- + f +(1 row) + +SELECT (binary_double_infinity > 0.0) OR (binary_double_nan < binary_double_infinity); -- 返回 true + ?column? +---------- + t +(1 row) + +SELECT binary_double_infinity + 1.0; -- 返回正无穷大 + ?column? +---------- + Infinity +(1 row) + +SELECT binary_double_nan + 1.0; -- 返回 NaN + ?column? +---------- + NaN +(1 row) + +SELECT binary_double_infinity - binary_double_infinity; -- 返回 NaN + ?column? +---------- + NaN +(1 row) + +SELECT binary_double_nan - 1.0; -- 返回 NaN + ?column? +---------- + NaN +(1 row) + +SELECT binary_double_infinity * binary_double_infinity; -- 返回正无穷大 + ?column? +---------- + Infinity +(1 row) + +SELECT binary_double_nan * 1.0; -- 返回 NaN + ?column? +---------- + NaN +(1 row) + +SELECT binary_double_infinity / binary_double_infinity; -- 返回 NaN + ?column? +---------- + NaN +(1 row) + +SELECT binary_double_nan / 1.0; -- 返回 NaN + ?column? +---------- + NaN +(1 row) + +CREATE TABLE T1(binary_double_nan INT); +INSERT INTO T1 VALUES(1),(2),(3); +SELECT binary_double_nan; + binary_double_nan +------------------- + NaN +(1 row) + +SELECT binary_double_nan FROM T1; + binary_double_nan +------------------- + NaN + NaN + NaN +(3 rows) + +SELECT T1.binary_double_nan FROM T1; + binary_double_nan +------------------- + 1 + 2 + 3 +(3 rows) + +DROP TABLE T1; +--CHECK约束 +create table t1( + num binary_double + CONSTRAINT check_num CHECK (num > 100) +); +insert into t1 values (binary_double_infinity);--插入成功 +insert into t1 values (-binary_double_infinity);--插入失败 +ERROR: new row for relation "t1" violates check constraint "check_num" +DETAIL: N/A +insert into t1 values (binary_double_nan);--插入成功 +drop table t1; +--隐式转换 +create table t1 (id int); +insert into t1 values(binary_double_infinity);--插入失败 +ERROR: integer out of range +CONTEXT: referenced column: id +insert into t1 values(-binary_double_infinity);--插入失败 +ERROR: integer out of range +CONTEXT: referenced column: id +insert into t1 values(binary_double_nan);--插入失败 +ERROR: integer out of range +CONTEXT: referenced column: id +drop table t1; +--聚集函数 +create table t1(id binary_double); +insert into t1 values(99); +insert into t1 values(100); +insert into t1 values(binary_double_infinity); +insert into t1 values(binary_double_nan); +select avg(id) from t1; + avg +----- + NaN +(1 row) + +select sum(id) from t1; + sum +----- + NaN +(1 row) + +select max(id) from t1; + max +----- + NaN +(1 row) + +select min(id) from t1; + min +----- + 99 +(1 row) + +drop table t1; +--关键字测试 +create table binary_double_infinity(col1 float4); +drop table binary_double_infinity; +create table t1(binary_double_nan float4); +insert into t1 values(3.14),(10),(15); +select binary_double_nan from t1; + binary_double_nan +------------------- + NaN + NaN + NaN +(3 rows) + +select t1.binary_double_nan from t1; + binary_double_nan +------------------- + 3.14 + 10 + 15 +(3 rows) + +drop table t1; +create table t1(binary_double_infinity float4 DEFAULT binary_double_infinity, id int); +insert into t1 (id) values(10); +insert into t1 values (100,9); +select * from t1; + binary_double_infinity | id +------------------------+---- + Infinity | 10 + 100 | 9 +(2 rows) + +select * from t1 where binary_double_infinity = 100; + binary_double_infinity | id +------------------------+---- +(0 rows) + +select * from t1 where binary_double_infinity = binary_double_infinity; + binary_double_infinity | id +------------------------+---- + Infinity | 10 + 100 | 9 +(2 rows) + +select id from t1 group by id having binary_double_infinity > 100; + id +---- + 9 + 10 +(2 rows) + +drop table t1; +select 3.14binary_double_nan; +ERROR: syntax error at or near "binary_double_nan" +LINE 1: select 3.14binary_double_nan; + ^ +select 3.14binary_double_infinity; +ERROR: syntax error at or near "binary_double_infinity" +LINE 1: select 3.14binary_double_infinity; + ^ +create user binary_double_infinity with password'gauss@123'; +drop user binary_double_infinity; +create schema binary_double_infinity; +drop schema binary_double_infinity; +CREATE FUNCTION binary_double_infinity(a INTEGER, b INTEGER) + RETURNS INTEGER + LANGUAGE plpgsql +AS +$$ +BEGIN + RETURN a + b; +END; +$$; +ERROR: syntax error at or near "(" +LINE 1: CREATE FUNCTION binary_double_infinity(a INTEGER, b INTEGER) + ^ +drop function binary_double_infinity; +ERROR: syntax error at or near ";" +LINE 1: drop function binary_double_infinity; + ^ +create type binary_double_infinity AS ( + id INT, + name VARCHAR(50), + age INT +); +drop type binary_double_infinity; +ERROR: syntax error at or near "binary_double_infinity" +LINE 1: drop type binary_double_infinity; + ^ +set disable_keyword_options = 'binary_double_nan'; +select 3.14binary_double_nan; + binary_double_nan +------------------- + 3.14 +(1 row) + +select 3.14binary_double_infinity; +ERROR: syntax error at or near "binary_double_infinity" +LINE 1: select 3.14binary_double_infinity; + ^ +create user binary_double_nan with password'gauss@123'; +drop user binary_double_nan; +create schema binary_double_nan; +drop schema binary_double_nan; +CREATE FUNCTION binary_double_nan(a INTEGER, b INTEGER) + RETURNS INTEGER + LANGUAGE plpgsql +AS +$$ +BEGIN + RETURN a + b; +END; +$$; +drop function binary_double_nan; +create type binary_double_nan AS ( + id INT, + name VARCHAR(50), + age INT +); +drop type binary_double_nan; +set disable_keyword_options = ''; +drop schema if exists test_binary cascade; +NOTICE: drop cascades to type "binary_double_infinity" diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 13a91f0bcb..de4669f680 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -482,3 +482,6 @@ test: accept_empty_str not_accept_empty_str pg_empty_str accept_empty_copy not_a #test: gin/cgin test: cgin_select ignore_keyword_list test: gin_select + +# test for binary_double suffix and Constant values +test: test_binary_suffix diff --git a/src/test/regress/sql/test_binary_suffix.sql b/src/test/regress/sql/test_binary_suffix.sql new file mode 100644 index 0000000000..213d834055 --- /dev/null +++ b/src/test/regress/sql/test_binary_suffix.sql @@ -0,0 +1,356 @@ +-- create new schema +drop schema if exists test_binary; +create schema test_binary; +set search_path=test_binary; + +set float_suffix_acceptance to on; +SELECT 3.14f; +SELECT 10.0d; +SELECT -2.5f; +SELECT -10.0d; +SELECT 1f; +SELECT 2d; +SELECT -3F; +SELECT -4D; +SELECT 123e3d; +SELECT 5.5df; +SELECT 3.6D; +SELECT 8.6fabc; + +CREATE TABLE test_table ( + id INT, + float_value FLOAT, + double_value BINARY_DOUBLE +); + +INSERT INTO test_table (id, float_value, double_value) VALUES (1, 3.14f, 2.7182d); +SELECT float_value * 2d, double_value + 1.5f FROM test_table; +DROP TABLE test_table; + +SELECT round(3.14159f, 2); +SELECT trunc(3.14159f, 2); +SELECT sqrt(4.0f); +SELECT power(2.0f, 3.0f); +SELECT sin(0.5f); +SELECT cos(0.5f); +SELECT log(10.0f); +SELECT exp(2.0f); + +CREATE FUNCTION test_function(input_val float) RETURNS float AS $$ +BEGIN + RETURN input_val * 2.0f; +END; +$$ LANGUAGE plpgsql; + +SELECT test_function(10.5f); +drop function test_function; + +CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); +CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); +CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +$$ +DECLARE + input_value FLOAT; +BEGIN + input_value := TG_ARGV[0]::FLOAT; + INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3, input_value); + RETURN NEW; +END +$$ LANGUAGE PLPGSQL; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5f); +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5d); +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +drop function tri_insert_func; +drop table test_trigger_src_tbl; +drop table test_trigger_des_tbl; + +SELECT 1.5f = 1.5; -- 返回 true +SELECT 1.5f <> 2.0; -- 返回 true +SELECT 1.5f > 1.0; -- 返回 true +SELECT 1.5f < 2.0; -- 返回 true +SELECT (1.5f > 1.0) AND (2.5f < 3.0); -- 返回 true +SELECT (1.5f > 1.0) OR (2.5f > 3.0); -- 返回 true +SELECT 1.5f + 2.5; -- 返回 4.0 +SELECT 3.5f - 1.5; -- 返回 2.0 +SELECT 2.0f * 3.0; -- 返回 6.0 +SELECT 4.0f / 2.0; -- 返回 2.0 + +set float_suffix_acceptance to off; +SELECT 3.14f; +SELECT 10.0d; +SELECT -2.5f; +SELECT -10.0d; +SELECT 1f; +SELECT 2d; +SELECT -3F; +SELECT -4D; +SELECT 123e3d; +SELECT 5.5df; +SELECT 3.6D; +SELECT 8.6fabc; + +INSERT INTO test_table (id, float_value, double_value) VALUES (1, 3.14f, 2.7182d); +SELECT float_value * 2d, double_value + 1.5f FROM test_table; +DROP TABLE test_table; + +SELECT round(3.14159f, 2); +SELECT trunc(3.14159f, 2); +SELECT sqrt(4.0f); +SELECT power(2.0f, 3.0f); +SELECT sin(0.5f); +SELECT cos(0.5f); +SELECT log(10.0f); +SELECT exp(2.0f); + +CREATE FUNCTION test_function(input_val float) RETURNS float AS $$ +BEGIN + RETURN input_val * 2.0f; +END; +$$ LANGUAGE plpgsql; + +SELECT test_function(10.5f); +DROP FUNCTION test_function; + +CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); +CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); +CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +$$ +DECLARE + input_value FLOAT; +BEGIN + input_value := TG_ARGV[0]::FLOAT; + INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3, input_value); + RETURN NEW; +END +$$ LANGUAGE PLPGSQL; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5f); +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(2.5d); +DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +drop function tri_insert_func; +drop table test_trigger_src_tbl; +drop table test_trigger_des_tbl; + +SELECT 1.5f = 1.5; +SELECT 1.5f <> 2.0; +SELECT 1.5f > 1.0; +SELECT 1.5f < 2.0; +SELECT (1.5f > 1.0) AND (2.5f < 3.0); +SELECT (1.5f > 1.0) OR (2.5f > 3.0); +SELECT 1.5f + 2.5; +SELECT 3.5f - 1.5; +SELECT 2.0f * 3.0; +SELECT 4.0f / 2.0; + +set float_suffix_acceptance to on; + +SELECT BINARY_DOUBLE_INFINITY; +SELECT BINARY_DOUBLE_NAN; + +CREATE TABLE test_table ( + id INT, + float_value binary_double +); +INSERT INTO test_table (id, float_value) +VALUES (1, BINARY_DOUBLE_NAN); +SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_NAN; + +INSERT INTO test_table (id, float_value) +VALUES (2, BINARY_DOUBLE_INFINITY); + +SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_INFINITY; +SELECT float_value + 1 FROM test_table WHERE id = 1; +SELECT float_value * 2 FROM test_table WHERE id = 2; +SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_NAN OR float_value = BINARY_DOUBLE_INFINITY; +DROP TABLE test_table; + +SELECT ABS(binary_double_infinity); +SELECT CEIL(binary_double_infinity); +SELECT SQRT(binary_double_infinity); +SELECT FLOOR(binary_double_infinity); +SELECT SQRT(binary_double_infinity); +SELECT EXP(binary_double_infinity); +SELECT LOG(binary_double_infinity); +SELECT POWER(binary_double_infinity, 2); +SELECT SIN(binary_double_infinity); + +SELECT ABS(binary_double_nan); +SELECT CEIL(binary_double_nan); +SELECT SQRT(binary_double_nan); +SELECT FLOOR(binary_double_nan); +SELECT SQRT(binary_double_nan); +SELECT EXP(binary_double_nan); +SELECT LOG(binary_double_nan); +SELECT POWER(binary_double_nan, 2); +SELECT SIN(binary_double_nan); + +-- 创建自定义函数 +CREATE FUNCTION test_function(value FLOAT) + RETURNS FLOAT + LANGUAGE plpgsql +AS $$ +BEGIN + IF value = binary_double_infinity THEN + RETURN 1.0; + ELSE + RETURN 0.0; + END IF; +END; +$$; + +SELECT test_function(binary_double_nan); +DROP FUNCTION test_function; +--表达式 +SELECT binary_double_infinity = binary_double_infinity; -- 返回 true +SELECT binary_double_nan = binary_double_nan; -- 返回 false +SELECT binary_double_nan = binary_double_infinity; -- 返回 false + +SELECT binary_double_infinity <> 0.0; -- 返回 true +SELECT binary_double_nan <> binary_double_nan; -- 返回 true + +SELECT binary_double_infinity > 0.0; -- 返回 true +SELECT binary_double_nan < binary_double_infinity; -- 返回 false + +SELECT (binary_double_infinity > 0.0) AND (binary_double_nan < binary_double_infinity); -- 返回 false +SELECT (binary_double_infinity > 0.0) OR (binary_double_nan < binary_double_infinity); -- 返回 true + +SELECT binary_double_infinity + 1.0; -- 返回正无穷大 +SELECT binary_double_nan + 1.0; -- 返回 NaN + +SELECT binary_double_infinity - binary_double_infinity; -- 返回 NaN +SELECT binary_double_nan - 1.0; -- 返回 NaN + +SELECT binary_double_infinity * binary_double_infinity; -- 返回正无穷大 +SELECT binary_double_nan * 1.0; -- 返回 NaN + +SELECT binary_double_infinity / binary_double_infinity; -- 返回 NaN +SELECT binary_double_nan / 1.0; -- 返回 NaN + +CREATE TABLE T1(binary_double_nan INT); +INSERT INTO T1 VALUES(1),(2),(3); +SELECT binary_double_nan; +SELECT binary_double_nan FROM T1; +SELECT T1.binary_double_nan FROM T1; +DROP TABLE T1; + +--CHECK约束 +create table t1( + num binary_double + CONSTRAINT check_num CHECK (num > 100) +); +insert into t1 values (binary_double_infinity);--插入成功 +insert into t1 values (-binary_double_infinity);--插入失败 +insert into t1 values (binary_double_nan);--插入成功 +drop table t1; + +--隐式转换 +create table t1 (id int); +insert into t1 values(binary_double_infinity);--插入失败 +insert into t1 values(-binary_double_infinity);--插入失败 +insert into t1 values(binary_double_nan);--插入失败 +drop table t1; + +--聚集函数 +create table t1(id binary_double); +insert into t1 values(99); +insert into t1 values(100); +insert into t1 values(binary_double_infinity); +insert into t1 values(binary_double_nan); +select avg(id) from t1; +select sum(id) from t1; +select max(id) from t1; +select min(id) from t1; +drop table t1; + +--关键字测试 +create table binary_double_infinity(col1 float4); +drop table binary_double_infinity; + +create table t1(binary_double_nan float4); +insert into t1 values(3.14),(10),(15); +select binary_double_nan from t1; +select t1.binary_double_nan from t1; +drop table t1; + +create table t1(binary_double_infinity float4 DEFAULT binary_double_infinity, id int); +insert into t1 (id) values(10); +insert into t1 values (100,9); + +select * from t1; +select * from t1 where binary_double_infinity = 100; +select * from t1 where binary_double_infinity = binary_double_infinity; +select id from t1 group by id having binary_double_infinity > 100; + +drop table t1; + +select 3.14binary_double_nan; +select 3.14binary_double_infinity; + +create user binary_double_infinity with password'gauss@123'; +drop user binary_double_infinity; + +create schema binary_double_infinity; +drop schema binary_double_infinity; + +CREATE FUNCTION binary_double_infinity(a INTEGER, b INTEGER) + RETURNS INTEGER + LANGUAGE plpgsql +AS +$$ +BEGIN + RETURN a + b; +END; +$$; +drop function binary_double_infinity; + +create type binary_double_infinity AS ( + id INT, + name VARCHAR(50), + age INT +); +drop type binary_double_infinity; + +set disable_keyword_options = 'binary_double_nan'; + +select 3.14binary_double_nan; +select 3.14binary_double_infinity; + +create user binary_double_nan with password'gauss@123'; +drop user binary_double_nan; + +create schema binary_double_nan; +drop schema binary_double_nan; + +CREATE FUNCTION binary_double_nan(a INTEGER, b INTEGER) + RETURNS INTEGER + LANGUAGE plpgsql +AS +$$ +BEGIN + RETURN a + b; +END; +$$; +drop function binary_double_nan; + +create type binary_double_nan AS ( + id INT, + name VARCHAR(50), + age INT +); +drop type binary_double_nan; +set disable_keyword_options = ''; + +drop schema if exists test_binary cascade; -- Gitee From 2315f699fe208af8f85c66c57b2b4b3931b6c026 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Wed, 12 Jun 2024 13:40:12 +0800 Subject: [PATCH 051/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B5=8B=E7=A9=BA?= =?UTF-8?q?=E5=80=BC=E6=8C=82=E5=BA=93=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_comp.cpp | 2 +- src/common/pl/plpgsql/src/pl_exec.cpp | 36 ++++++++--- .../expected/plpgsql_cursor_rowtype.out | 59 +++++++++++++++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 64 +++++++++++++++++++ 4 files changed, 151 insertions(+), 10 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index fd71cf3c7d..e27b2cd29b 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -2212,7 +2212,7 @@ static Node* make_datum_param(PLpgSQL_expr* expr, int dno, int location) cnst->consttypmod = TupleDescAttr(tupdesc, i)->atttypmod; cnst->constcollid = attr->attcollation; cnst->constlen = attr->attlen; - cnst->constvalue = SPI_getbinval(rec->tup, tupdesc, i + 1, &isnull); + cnst->constvalue = rec->tup!=NULL ? SPI_getbinval(rec->tup, tupdesc, i + 1, &isnull) : NULL; cnst->constisnull = isnull; cnst->constbyval = attr->attbyval; cnst->location = -1; /* "unknown" */ diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index 0550ad1e40..f21f13ef3f 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -9643,17 +9643,35 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v /* Source must be of RECORD or composite type */ if (!type_is_rowtype(valtype)) { - Datum values[1] = {value}; - bool nulls[1] = {false}; - HeapTuple tuple = NULL; + if (value != 0) { + Datum values[1] = {value}; + bool nulls[1] = {false}; + HeapTuple tuple = NULL; - tupdesc = CreateTemplateTupleDesc(1, false); - TupleDescInitEntry(tupdesc, (AttrNumber)1, "datum", valtype, -1, 0); - tuple = (HeapTuple)tableam_tops_form_tuple(tupdesc, values, nulls); + tupdesc = CreateTemplateTupleDesc(1, false); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "datum", valtype, -1, 0); + tuple = (HeapTuple)tableam_tops_form_tuple(tupdesc, values, nulls); - exec_move_row_from_fields(estate, target, tuple, tupdesc); - ReleaseTupleDesc(tupdesc); - heap_freetuple_ext(tuple); + exec_move_row_from_fields(estate, target, tuple, tupdesc); + ReleaseTupleDesc(tupdesc); + heap_freetuple_ext(tuple); + } else { + HeapTuple newtup = NULL; + bool *newnulls; + TupleDesc tupdesc = rec->tupdesc; + int td_natts = tupdesc ? tupdesc->natts : 0; + int rc = 0; + newnulls = (bool *)palloc(td_natts * sizeof(bool)); + rc = memset_s(newnulls, td_natts * sizeof(bool), true, td_natts * sizeof(bool)); + securec_check(rc, "\0", "\0"); + newtup = (HeapTuple)tableam_tops_form_tuple(tupdesc, NULL, newnulls); + if (rec->freetup) { + heap_freetuple_ext(rec->tup); + } + rec->tup = newtup; + rec->freetup = true; + pfree_ext(newnulls); + } } else { HeapTupleHeader td; Oid tupType; diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 518f4afc9e..63a074adc1 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -5,6 +5,65 @@ drop schema if exists plpgsql_cursor_rowtype; NOTICE: schema "plpgsql_cursor_rowtype" does not exist, skipping create schema plpgsql_cursor_rowtype; set current_schema = plpgsql_cursor_rowtype; +CREATE TABLE test_2 ( + id NUMBER PRIMARY KEY, + first_name VARCHAR2(50), + last_name VARCHAR2(50), + email VARCHAR2(25) NOT NULL, + phone_number VARCHAR2(20), + hire_date DATE NOT NULL +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_2_pkey" for table "test_2" +INSERT INTO test_2 VALUES (100, 'John', 'Doe','john.doe@example.com', '123-4567-8901', TO_DATE('2000-01-01', 'YYYY-MM-DD')); +INSERT INTO test_2 VALUES (101, 'Jane', 'Smith','jane.smith@example.com', '456-8324-4579', TO_DATE('1999-02-08', 'YYYY-MM-DD')); +DECLARE + CURSOR curtest_2(p_id INT , p_first_name VARCHAR2, p_last_name VARCHAR2 DEFAULT 'Doe') IS + SELECT id, first_name, last_name, email, phone_number, hire_date + FROM test_2 + WHERE (first_name = p_first_name) + AND (last_name = p_last_name) + AND (id = p_id); + v_result curtest_2%ROWTYPE; +BEGIN + v_result:=NULL; + raise notice 'Result: %', v_result; +END; +/ +NOTICE: Result: (,,,,,) +DECLARE + CURSOR curtest_2(p_id INT , p_first_name VARCHAR2, p_last_name VARCHAR2 DEFAULT 'Doe') IS + SELECT id, first_name, last_name, email, phone_number, hire_date + FROM test_2 + WHERE (first_name = p_first_name) + AND (last_name = p_last_name) + AND (id = p_id); + v_result curtest_2%ROWTYPE; +BEGIN + OPEN curtest_2(100,'John'); + FETCH curtest_2 INTO v_result; + raise notice 'Result: %', v_result; + CLOSE curtest_2; + + v_result:=NULL; + raise notice 'Result: %', v_result; +END; +/ +NOTICE: Result: (100,John,Doe,john.doe@example.com,123-4567-8901,"Sat Jan 01 00:00:00 2000") +NOTICE: Result: (,,,,,) +DECLARE + CURSOR curtest_2(p_id INT , p_first_name VARCHAR2, p_last_name VARCHAR2 DEFAULT 'Doe') IS + SELECT id, first_name, last_name, email, phone_number, hire_date + FROM test_2 + WHERE (first_name = p_first_name) + AND (last_name = p_last_name) + AND (id = p_id); + v_result curtest_2%ROWTYPE; +BEGIN + raise notice 'Result: %', v_result; +END; +/ +NOTICE: Result: (,,,,,) +drop table test_2 cascade; set behavior_compat_options='allow_procedure_compile_check'; create table emp (empno int, ename varchar(10), job varchar(10)); insert into emp values (1, 'zhangsan', 'job1'); diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 94c76c2eec..ccc54dd63b 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -4,6 +4,70 @@ drop schema if exists plpgsql_cursor_rowtype; create schema plpgsql_cursor_rowtype; set current_schema = plpgsql_cursor_rowtype; + +CREATE TABLE test_2 ( + id NUMBER PRIMARY KEY, + first_name VARCHAR2(50), + last_name VARCHAR2(50), + email VARCHAR2(25) NOT NULL, + phone_number VARCHAR2(20), + hire_date DATE NOT NULL +); + +INSERT INTO test_2 VALUES (100, 'John', 'Doe','john.doe@example.com', '123-4567-8901', TO_DATE('2000-01-01', 'YYYY-MM-DD')); +INSERT INTO test_2 VALUES (101, 'Jane', 'Smith','jane.smith@example.com', '456-8324-4579', TO_DATE('1999-02-08', 'YYYY-MM-DD')); + +DECLARE + CURSOR curtest_2(p_id INT , p_first_name VARCHAR2, p_last_name VARCHAR2 DEFAULT 'Doe') IS + SELECT id, first_name, last_name, email, phone_number, hire_date + FROM test_2 + WHERE (first_name = p_first_name) + AND (last_name = p_last_name) + AND (id = p_id); + + v_result curtest_2%ROWTYPE; +BEGIN + v_result:=NULL; + raise notice 'Result: %', v_result; +END; +/ + +DECLARE + CURSOR curtest_2(p_id INT , p_first_name VARCHAR2, p_last_name VARCHAR2 DEFAULT 'Doe') IS + SELECT id, first_name, last_name, email, phone_number, hire_date + FROM test_2 + WHERE (first_name = p_first_name) + AND (last_name = p_last_name) + AND (id = p_id); + + v_result curtest_2%ROWTYPE; +BEGIN + OPEN curtest_2(100,'John'); + FETCH curtest_2 INTO v_result; + raise notice 'Result: %', v_result; + CLOSE curtest_2; + + v_result:=NULL; + raise notice 'Result: %', v_result; +END; +/ + +DECLARE + CURSOR curtest_2(p_id INT , p_first_name VARCHAR2, p_last_name VARCHAR2 DEFAULT 'Doe') IS + SELECT id, first_name, last_name, email, phone_number, hire_date + FROM test_2 + WHERE (first_name = p_first_name) + AND (last_name = p_last_name) + AND (id = p_id); + + v_result curtest_2%ROWTYPE; +BEGIN + raise notice 'Result: %', v_result; +END; +/ + +drop table test_2 cascade; + set behavior_compat_options='allow_procedure_compile_check'; create table emp (empno int, ename varchar(10), job varchar(10)); -- Gitee From c59d0861dc681f11723bf30b57d9f630a1f898ed Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Wed, 3 Jul 2024 10:43:28 +0800 Subject: [PATCH 052/347] =?UTF-8?q?TO=5FBINARY=5FFLOAT=20=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E5=AE=9E=E7=8E=B0=E3=80=81IS=20[NOT]=20NAN=E4=B8=8EIS?= =?UTF-8?q?=20[NOT]=20INFINITE=E8=A1=A8=E8=BE=BE=E5=BC=8F=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/builtin_funcs.ini | 9 + src/common/backend/nodes/copyfuncs.cpp | 34 + src/common/backend/nodes/equalfuncs.cpp | 22 + src/common/backend/nodes/nodeFuncs.cpp | 38 + src/common/backend/nodes/nodes.cpp | 4 + src/common/backend/nodes/outfuncs.cpp | 22 + src/common/backend/nodes/readfuncs.cpp | 30 + src/common/backend/parser/gram.y | 63 +- src/common/backend/parser/parse_expr.cpp | 16 + src/common/backend/utils/adt/float.cpp | 201 ++ src/common/backend/utils/adt/ruleutils.cpp | 50 + src/common/backend/utils/adt/selfuncs.cpp | 135 ++ src/common/backend/utils/init/globals.cpp | 3 +- .../interfaces/libpq/frontend_parser/gram.y | 8 +- .../cbb/instruments/utils/unique_query.cpp | 14 + src/gausskernel/optimizer/path/clausesel.cpp | 8 + src/gausskernel/optimizer/path/costsize.cpp | 13 + src/gausskernel/optimizer/prep/prepqual.cpp | 16 + src/gausskernel/runtime/executor/execExpr.cpp | 60 + .../runtime/executor/execExprInterp.cpp | 131 ++ src/gausskernel/runtime/executor/execQual.cpp | 214 +++ .../rollback-post_catalog_maindb_92_940.sql | 11 + .../rollback-post_catalog_otherdb_92_940.sql | 11 + .../upgrade-post_catalog_maindb_92_940.sql | 67 + .../upgrade-post_catalog_otherdb_92_940.sql | 67 + src/include/executor/executor.h | 1 + src/include/nodes/execExpr.h | 20 + src/include/nodes/execnodes.h | 18 + src/include/nodes/nodes.h | 5 + src/include/nodes/primnodes.h | 30 + src/include/parser/kwlist.h | 3 + src/include/utils/builtins.h | 3 + src/include/utils/selfuncs.h | 7 +- .../expected/expr_nantest_infinitetest.out | 1684 +++++++++++++++++ .../regress/expected/func_to_binary_float.out | 609 ++++++ src/test/regress/parallel_schedule0A | 3 + .../regress/sql/expr_nantest_infinitetest.sql | 462 +++++ src/test/regress/sql/func_to_binary_float.sql | 145 ++ 38 files changed, 4229 insertions(+), 8 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql create mode 100644 src/test/regress/expected/expr_nantest_infinitetest.out create mode 100644 src/test/regress/expected/func_to_binary_float.out create mode 100644 src/test/regress/sql/expr_nantest_infinitetest.sql create mode 100644 src/test/regress/sql/func_to_binary_float.sql diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index f82388d397..143ccf1145 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -12111,6 +12111,15 @@ AddFuncGroup( AddBuiltinFunc(_0(1846), _1("to_ascii"), _2(2), _3(true), _4(false), _5(to_ascii_enc), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 25, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_ascii_enc"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("encode text from encoding to ASCII text"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), AddBuiltinFunc(_0(1847), _1("to_ascii"), _2(2), _3(true), _4(false), _5(to_ascii_encname), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 25, 19), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_ascii_encname"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("encode text from encoding to ASCII text"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "to_binary_float", 6, + AddBuiltinFunc(_0(7012), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_text), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 25, 25, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_text"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7013), _1("to_binary_float"), _2(1), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, ' ', false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7014), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_number), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 701, 701, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_number"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7015), _1("to_binary_float"), _2(1), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, 0, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7016), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 701, 25, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, 0, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7017), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_text_number), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 25, 701, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_text_number"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "to_char", 11, AddBuiltinFunc(_0(INTERVALTOCHARFUNCOID), _1("to_char"), _2(2), _3(true), _4(false), _5(interval_to_char), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 1186, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("interval_to_char"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(true), _32(false), _33("format interval to text"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index db631ef7b6..e25c8514e1 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -3214,6 +3214,34 @@ static NullTest* _copyNullTest(const NullTest* from) return newnode; } +/* + * _copyNanTest + */ +static NanTest* _copyNanTest(const NanTest* from) +{ + NanTest* newnode = makeNode(NanTest); + + COPY_SCALAR_FIELD(xpr.selec); + COPY_NODE_FIELD(arg); + COPY_SCALAR_FIELD(nantesttype); + + return newnode; +} + +/* + * _copyInfiniteTest + */ +static InfiniteTest* _copyInfiniteTest(const InfiniteTest* from) +{ + InfiniteTest* newnode = makeNode(InfiniteTest); + + COPY_SCALAR_FIELD(xpr.selec); + COPY_NODE_FIELD(arg); + COPY_SCALAR_FIELD(infinitetesttype); + + return newnode; +} + /* * _copySetVariableExpr */ @@ -8257,6 +8285,12 @@ void* copyObject(const void* from) case T_NullTest: retval = _copyNullTest((NullTest*)from); break; + case T_NanTest: + retval = _copyNanTest((NanTest*)from); + break; + case T_InfiniteTest: + retval = _copyInfiniteTest((InfiniteTest*)from); + break; case T_SetVariableExpr: retval = _copySetVariableExpr((SetVariableExpr*)from); break; diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index 521718bf2b..a551d297a4 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -619,6 +619,22 @@ static bool _equalNullTest(const NullTest* a, const NullTest* b) return true; } +static bool _equalNanTest(const NanTest* a, const NanTest* b) +{ + COMPARE_NODE_FIELD(arg); + COMPARE_SCALAR_FIELD(nantesttype); + + return true; +} + +static bool _equalInfiniteTest(const InfiniteTest* a, const InfiniteTest* b) +{ + COMPARE_NODE_FIELD(arg); + COMPARE_SCALAR_FIELD(infinitetesttype); + + return true; +} + static bool _equalSetVariableExpr(const SetVariableExpr *a, const SetVariableExpr *b) { COMPARE_STRING_FIELD(name); @@ -3807,6 +3823,12 @@ bool equal(const void* a, const void* b) case T_NullTest: retval = _equalNullTest((NullTest*)a, (NullTest*)b); break; + case T_NanTest: + retval = _equalNanTest((NanTest*)a, (NanTest*)b); + break; + case T_InfiniteTest: + retval = _equalInfiniteTest((InfiniteTest*)a, (InfiniteTest*)b); + break; case T_SetVariableExpr: retval = _equalSetVariableExpr((SetVariableExpr*)a, (SetVariableExpr*)b); break; diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp index 01b25da07d..a1c04ce1a8 100644 --- a/src/common/backend/nodes/nodeFuncs.cpp +++ b/src/common/backend/nodes/nodeFuncs.cpp @@ -60,6 +60,8 @@ Oid exprType(const Node* expr) case T_CurrentOfExpr: case T_HashFilter: case T_NullTest: + case T_NanTest: + case T_InfiniteTest: case T_ScalarArrayOpExpr: case T_RowCompareExpr: type = BOOLOID; @@ -959,6 +961,8 @@ Oid exprCollation(const Node* expr) } break; case T_NullTest: + case T_NanTest: + case T_InfiniteTest: case T_HashFilter: coll = InvalidOid; /* result is always boolean */ break; @@ -1193,6 +1197,8 @@ void exprSetCollation(Node* expr, Oid collation) : (collation == InvalidOid)); break; case T_NullTest: + case T_NanTest: + case T_InfiniteTest: case T_HashFilter: Assert(!OidIsValid(collation)); /* result is always boolean */ break; @@ -1453,6 +1459,14 @@ int exprLocation(const Node* expr) /* just use argument's location */ loc = exprLocation((Node*)((const NullTest*)expr)->arg); break; + case T_NanTest: + /* just use argument's location */ + loc = exprLocation((Node*)((const NanTest*)expr)->arg); + break; + case T_InfiniteTest: + /* just use argument's location */ + loc = exprLocation((Node*)((const InfiniteTest*)expr)->arg); + break; case T_HashFilter: /* just use argument's location */ loc = exprLocation((Node*)((const HashFilter*)expr)->arg); @@ -1937,6 +1951,10 @@ bool expression_tree_walker(Node* node, bool (*walker)(), void* context) } break; case T_NullTest: return p2walker(((NullTest*)node)->arg, context); + case T_NanTest: + return p2walker(((NanTest*)node)->arg, context); + case T_InfiniteTest: + return p2walker(((InfiniteTest*)node)->arg, context); case T_HashFilter: return p2walker(((HashFilter*)node)->arg, context); case T_BooleanTest: @@ -2635,6 +2653,22 @@ Node* expression_tree_mutator(Node* node, Node* (*mutator)(Node*, void*), void* MUTATE(newnode->arg, ntest->arg, Expr*); return (Node*)newnode; } break; + case T_NanTest: { + NanTest* ntest = (NanTest*)node; + NanTest* newnode = NULL; + + FLATCOPY(newnode, ntest, NanTest, isCopy); + MUTATE(newnode->arg, ntest->arg, Expr*); + return (Node*)newnode; + } break; + case T_InfiniteTest: { + InfiniteTest* ntest = (InfiniteTest*)node; + InfiniteTest* newnode = NULL; + + FLATCOPY(newnode, ntest, InfiniteTest, isCopy); + MUTATE(newnode->arg, ntest->arg, Expr*); + return (Node*)newnode; + } break; case T_HashFilter: { HashFilter* htest = (HashFilter*)node; HashFilter* newnode = NULL; @@ -3127,6 +3161,10 @@ bool raw_expression_tree_walker(Node* node, bool (*walker)(), void* context) } break; case T_NullTest: return p2walker(((NullTest*)node)->arg, context); + case T_NanTest: + return p2walker(((NanTest*)node)->arg, context); + case T_InfiniteTest: + return p2walker(((InfiniteTest*)node)->arg, context); case T_BooleanTest: return p2walker(((BooleanTest*)node)->arg, context); case T_HashFilter: diff --git a/src/common/backend/nodes/nodes.cpp b/src/common/backend/nodes/nodes.cpp index f01dc2c83d..538b1887f6 100755 --- a/src/common/backend/nodes/nodes.cpp +++ b/src/common/backend/nodes/nodes.cpp @@ -197,6 +197,8 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_MinMaxExpr, "MinMaxExpr"}, {T_XmlExpr, "XmlExpr"}, {T_NullTest, "NullTest"}, + {T_NanTest, "NanTest"}, + {T_InfiniteTest, "InfiniteTest"}, {T_BooleanTest, "BooleanTest"}, {T_CoerceToDomain, "CoerceToDomain"}, {T_CoerceToDomainValue, "CoerceToDomainValue"}, @@ -239,6 +241,8 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_MinMaxExprState, "MinMaxExprState"}, {T_XmlExprState, "XmlExprState"}, {T_NullTestState, "NullTestState"}, + {T_NanTestState, "NanTestState"}, + {T_InfiniteTestState, "InfiniteTestState"}, {T_HashFilterState, "HashFilterState"}, {T_CoerceToDomainState, "CoerceToDomainState"}, {T_DomainConstraintState, "DomainConstraintState"}, diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 9add6aab09..9464c8f285 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -3065,6 +3065,22 @@ static void _outNullTest(StringInfo str, NullTest* node) WRITE_BOOL_FIELD(argisrow); } +static void _outNanTest(StringInfo str, NanTest* node) +{ + WRITE_NODE_TYPE("NANTEST"); + + WRITE_NODE_FIELD(arg); + WRITE_ENUM_FIELD(nantesttype, NanTestType); +} + +static void _outInfiniteTest(StringInfo str, InfiniteTest* node) +{ + WRITE_NODE_TYPE("INFINITETEST"); + + WRITE_NODE_FIELD(arg); + WRITE_ENUM_FIELD(infinitetesttype, InfiniteTestType); +} + static void _outSetVariableExpr(StringInfo str, SetVariableExpr* node) { WRITE_NODE_TYPE("SetVariableExpr"); @@ -6679,6 +6695,12 @@ static void _outNode(StringInfo str, const void* obj) case T_NullTest: _outNullTest(str, (NullTest*)obj); break; + case T_NanTest: + _outNanTest(str, (NanTest*)obj); + break; + case T_InfiniteTest: + _outInfiniteTest(str, (InfiniteTest*)obj); + break; case T_SetVariableExpr: _outSetVariableExpr(str, (SetVariableExpr*)obj); break; diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 82beff2953..ca2ebc23e9 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -2940,6 +2940,32 @@ static NullTest* _readNullTest(void) READ_DONE(); } +/* + * _readNanTest + */ +static NanTest* _readNanTest(void) +{ + READ_LOCALS(NanTest); + + READ_NODE_FIELD(arg); + READ_ENUM_FIELD(nantesttype, NanTestType); + + READ_DONE(); +} + +/* + * _readInfiniteTest + */ +static InfiniteTest* _readInfiniteTest(void) +{ + READ_LOCALS(InfiniteTest); + + READ_NODE_FIELD(arg); + READ_ENUM_FIELD(infinitetesttype, InfiniteTestType); + + READ_DONE(); +} + /* * _readSetVariableExpr */ @@ -6590,6 +6616,10 @@ Node* parseNodeString(void) return_value = _readXmlExpr(); } else if (MATCH("NULLTEST", 8)) { return_value = _readNullTest(); + } else if (MATCH("NANTEST", 7)) { + return_value = _readNanTest(); + } else if (MATCH("INFINITETEST", 12)) { + return_value = _readInfiniteTest(); } else if (MATCH("SETVARIABLEEXPR", 15)) { return_value = _readSetVariableExpr(); } else if (MATCH("HASHFILTER", 10)) { diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 7b3d52ea34..1d42e3ef40 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -905,7 +905,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); /* PGXC_END */ DROP DUPLICATE DISCONNECT DUMPFILE - EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE END_P ENDS ENFORCED ENUM_P ERRORS ESCAPE EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE + EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE END_P ENDS ENFORCED ENUM_P ERROR_P ERRORS ESCAPE EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE EXCLUDE EXCLUDED EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPIRED_P EXPLAIN EXTENSION EXTERNAL EXTRACT ESCAPED @@ -918,7 +918,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); HANDLER HAVING HDFSDIRECTORY HEADER_P HOLD HOUR_P HOUR_MINUTE_P HOUR_SECOND_P IDENTIFIED IDENTITY_P IF_P IGNORE IGNORE_EXTRA_DATA ILIKE IMMEDIATE IMMUTABLE IMPLICIT_P IN_P INCLUDE - INCLUDING INCREMENT INCREMENTAL INDEX INDEXES INFILE INHERIT INHERITS INITIAL_P INITIALLY INITRANS INLINE_P + INCLUDING INCREMENT INCREMENTAL INDEX INDEXES INFILE INFINITE_P INHERIT INHERITS INITIAL_P INITIALLY INITRANS INLINE_P INNER_P INOUT INPUT_P INSENSITIVE INSERT INSTEAD INT_P INTEGER INTERNAL INTERSECT INTERVAL INTO INVISIBLE INVOKER IP IS ISNULL ISOLATION @@ -933,7 +933,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); MAPPING MASKING MASTER MATCH MATERIALIZED MATCHED MAXEXTENTS MAXSIZE MAXTRANS MAXVALUE MERGE MESSAGE_TEXT METHOD MINUS_P MINUTE_P MINUTE_SECOND_P MINVALUE MINEXTENTS MODE MODEL MODIFY_P MONTH_P MOVE MOVEMENT MYSQL_ERRNO // DB4AI - NAME_P NAMES NATIONAL NATURAL NCHAR NEXT NO NOCOMPRESS NOCYCLE NODE NOLOGGING NOMAXVALUE NOMINVALUE NONE + NAME_P NAMES NAN_P NATIONAL NATURAL NCHAR NEXT NO NOCOMPRESS NOCYCLE NODE NOLOGGING NOMAXVALUE NOMINVALUE NONE NOT NOTHING NOTIFY NOTNULL NOVALIDATE NOWAIT NULL_P NULLCOLS NULLIF NULLS_P NUMBER_P NUMERIC NUMSTR NVARCHAR NVARCHAR2 NVL OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR @@ -26835,6 +26835,34 @@ a_expr: c_expr { $$ = $1; } n->nulltesttype = IS_NOT_NULL; $$ = (Node *)n; } + | a_expr IS NAN_P + { + NanTest *n = makeNode(NanTest); + n->arg = (Expr *) makeTypeCast($1, SystemTypeName("float8"), @1); + n->nantesttype = IS_NAN; + $$ = (Node *)n; + } + | a_expr IS NOT NAN_P + { + NanTest *n = makeNode(NanTest); + n->arg = (Expr *) makeTypeCast($1, SystemTypeName("float8"), @1); + n->nantesttype = IS_NOT_NAN; + $$ = (Node *)n; + } + | a_expr IS INFINITE_P + { + InfiniteTest *n = makeNode(InfiniteTest); + n->arg = (Expr *) makeTypeCast($1, SystemTypeName("float8"), @1); + n->infinitetesttype = IS_INFINITE; + $$ = (Node *)n; + } + | a_expr IS NOT INFINITE_P + { + InfiniteTest *n = makeNode(InfiniteTest); + n->arg = (Expr *) makeTypeCast($1, SystemTypeName("float8"), @1); + n->infinitetesttype = IS_NOT_INFINITE; + $$ = (Node *)n; + } | row OVERLAPS row { /* Create and populate a FuncCall node to support the OVERLAPS operator. */ @@ -27656,6 +27684,32 @@ func_application_special: func_name '(' ')' n->call_func = false; $$ = (Node *)n; } + | func_name '(' func_arg_list DEFAULT func_arg_expr ON CONVERSION_P ERROR_P opt_sort_clause ')' + { + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("The syntax or function is not supported. \"%s\"", $4))); + } + if (IsA($5, ColumnRef)) { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("Default param can't be ColumnRef"))); + } + + FuncCall *n = makeNode(FuncCall); + n->funcname = $1; + n->args = lappend($3, $5); + n->args = lappend(n->args, makeBoolAConst(TRUE, -1)); + n->agg_order = $9; + n->agg_star = FALSE; + n->agg_distinct = FALSE; + n->func_variadic = FALSE; + n->over = NULL; + n->location = @1; + n->call_func = false; + $$ = (Node *) n; + } | func_name '(' VARIADIC func_arg_expr opt_sort_clause ')' { FuncCall *n = makeNode(FuncCall); @@ -29914,6 +29968,7 @@ unreserved_keyword: | ENFORCED | ENUM_P | EOL + | ERROR_P | ERRORS | ESCAPE | ESCAPED @@ -29973,6 +30028,7 @@ unreserved_keyword: | INDEX | INDEXES | INFILE + | INFINITE_P | INHERIT | INHERITS | INITIAL_P @@ -30039,6 +30095,7 @@ unreserved_keyword: | MYSQL_ERRNO | NAME_P | NAMES + | NAN_P | NEXT | NO | NOCOMPRESS diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 7a55479be9..32117e8f8a 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -533,6 +533,22 @@ Node *transformExprRecurse(ParseState *pstate, Node *expr) break; } + case T_NanTest: { + NanTest* n = (NanTest*)expr; + + n->arg = (Expr*)transformExprRecurse(pstate, (Node*)n->arg); + result = expr; + break; + } + + case T_InfiniteTest: { + InfiniteTest* n = (InfiniteTest*)expr; + + n->arg = (Expr*)transformExprRecurse(pstate, (Node*)n->arg); + result = expr; + break; + } + case T_BooleanTest: result = transformBooleanTest(pstate, (BooleanTest*)expr); break; diff --git a/src/common/backend/utils/adt/float.cpp b/src/common/backend/utils/adt/float.cpp index 1d41498ea7..0d6a4d74d8 100644 --- a/src/common/backend/utils/adt/float.cpp +++ b/src/common/backend/utils/adt/float.cpp @@ -62,6 +62,7 @@ static const uint32 nan[2] = {0xffffffff, 0x7fffffff}; static int float4_cmp_internal(float4 a, float4 b); double float8in_internal(char* str, char** s, bool* hasError); +static double to_binary_float_internal(char* origin_num, bool *err); #ifndef HAVE_CBRT /* @@ -2924,3 +2925,203 @@ Datum float8_to_interval(PG_FUNCTION_ARGS) DirectFunctionCall1(float8_numeric, val), Int32GetDatum(typmod)); } + +/* + * to_binary_float: convert 'origin_num' to a single precision floating-point number。 + * + * - err: if true, indicate convert failed; if false, indicate convert succeed. + */ +static double to_binary_float_internal(char* origin_num, bool *err) +{ + char* num = origin_num; + double val; + char* endptr; + + *err = false; + + if (*num == '\0') { + *err = true; + return 0; + } + + /* skip leading whitespace */ + while (*num != '\0' && isspace((unsigned char)*num)) + num++; + + errno = 0; + val = strtod(num, &endptr); + + /* change -0 to 0 */ + if (*num == '-' && val == 0.0) { + val += 0.0; + } + + // to_binary_float accept 'Nan', '[+-]Inf' + if (endptr == num || errno != 0) { + int save_errno = errno; + + if (pg_strcasecmp(num, "NaN") == 0) { + val = get_float4_nan(); + endptr = num + 3; + } else if (pg_strncasecmp(num, "Infinity", 8) == 0) { + val = get_float4_infinity(); + endptr = num + 8; + } else if (pg_strncasecmp(num, "-Infinity", 9) == 0) { + val = -get_float4_infinity(); + endptr = num + 9; + } else if (pg_strncasecmp(num, "Inf", 3) == 0) { + val = get_float4_infinity(); + endptr = num + 3; + } else if (pg_strncasecmp(num, "-Inf", 4) == 0) { + val = -get_float4_infinity(); + endptr = num + 4; + } else if (save_errno == ERANGE) { + // convert to infinite + if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL) + val = (val == 0.0 ? 0 : (val >= HUGE_VAL ? get_float4_infinity() : -get_float4_infinity())); + } + } +#ifdef HAVE_BUGGY_SOLARIS_STRTOD + else { + /* + * Many versions of Solaris have a bug wherein strtod sets endptr to + * point one byte beyond the end of the string when given "inf" or + * "infinity". + */ + if (endptr != num && endptr[-1] == '\0') + endptr--; + } +#endif /* HAVE_BUGGY_SOLARIS_STRTOD */ + + /* skip trailing whitespace */ + while (*endptr != '\0' && isspace((unsigned char)*endptr)) + endptr++; + + if (*endptr != '\0') { + *err = true; + return 0; + } + + if (isinf((float4)val) && !isinf(val)) { + val = val < 0 ? -get_float4_infinity() : get_float4_infinity(); + } + if (((float4)val) == 0.0 && val != 0) { + val = 0; + } + + return val; +} + +/* + * to_binary_float_text() - convert to a single precision floating-point number. + */ +Datum to_binary_float_text(PG_FUNCTION_ARGS) +{ + bool str1_null = PG_ARGISNULL(0); + bool str2_null = PG_ARGISNULL(1); + bool with_default = PG_GETARG_BOOL(2); + + char *num1, *num2; + double result, r1, r2; + bool err1, err2; + + err1 = true; + if (!str1_null) { + num1 = TextDatumGetCString(PG_GETARG_TEXT_P(0)); + r1 = to_binary_float_internal(num1, &err1); + pfree_ext(num1); + } + + err2 = true; + if (with_default && !str2_null) { + num2 = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + r2 = to_binary_float_internal(num2, &err2); + pfree_ext(num2); + } + + /* + * IF str1 is NULL, return NULL, expect with default and str2 convert error. + */ + if (str1_null && with_default && !str2_null && err2) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type real"))); + + if (str1_null) + PG_RETURN_NULL(); + + if (!err1) { + result = r1; + } else if (with_default) { + if (str2_null) + PG_RETURN_NULL(); + + if (err2) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type real"))); + else + result = r2; + } else { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type real"))); + } + + PG_RETURN_FLOAT4((float4)result); +} + +/* + * to_binary_float_number() + */ +Datum to_binary_float_number(PG_FUNCTION_ARGS) +{ + if (PG_ARGISNULL(0)) + PG_RETURN_NULL(); + + float8 val = PG_GETARG_FLOAT8(0); + + if (val > FLT_MAX) { + val = get_float4_infinity(); + } else if (val < FLT_MIN) { + val = -get_float4_infinity(); + } + + PG_RETURN_FLOAT4((float4)val); +} + +Datum to_binary_float_text_number(PG_FUNCTION_ARGS) +{ + if (PG_ARGISNULL(0)) + PG_RETURN_NULL(); + + bool with_default = PG_GETARG_BOOL(2); + + char *num; + double result; + bool err; + + err = false; + num = TextDatumGetCString(PG_GETARG_TEXT_P(0)); + result = to_binary_float_internal(num, &err); + pfree_ext(num); + + // if str1 convert err, and with default, convert str2 + if (with_default && err && !PG_ARGISNULL(1)) { + err = false; + result = PG_GETARG_FLOAT8(1); + if (result > FLT_MAX) { + result = get_float4_infinity(); + } else if (result < FLT_MIN) { + result = -get_float4_infinity(); + } + } + + if (err) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type real"))); + } + + PG_RETURN_FLOAT4((float4)result); +} diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index ef1dfdfb4a..1517a80b46 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -9551,6 +9551,8 @@ static bool isSimpleNode(Node* node, Node* parentNode, int prettyFlags) case T_SubLink: case T_NullTest: + case T_NanTest: + case T_InfiniteTest: case T_BooleanTest: case T_HashFilter: case T_DistinctExpr: @@ -10485,6 +10487,54 @@ static void get_rule_expr(Node* node, deparse_context* context, bool showimplici appendStringInfoChar(buf, ')'); } break; + case T_NanTest: { + NanTest* ntest = (NanTest*)node; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node*)ntest->arg, context, true, node, no_alias); + + switch (ntest->nantesttype) + { + case IS_NAN: + appendStringInfoString(buf, " IS NAN"); + break; + case IS_NOT_NAN: + appendStringInfoString(buf, " IS NOT NAN"); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized nantesttype: %d", (int)ntest->nantesttype))); + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } break; + + case T_InfiniteTest: { + InfiniteTest* ntest = (InfiniteTest*)node; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node*)ntest->arg, context, true, node, no_alias); + + switch (ntest->infinitetesttype) + { + case IS_INFINITE: + appendStringInfoString(buf, " IS INFINITE"); + break; + case IS_NOT_INFINITE: + appendStringInfoString(buf, " IS NOT INFINITE"); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized infinitetesttype: %d", (int)ntest->infinitetesttype))); + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } break; + case T_HashFilter: { HashFilter* htest = (HashFilter*)node; ListCell* distVar = NULL; diff --git a/src/common/backend/utils/adt/selfuncs.cpp b/src/common/backend/utils/adt/selfuncs.cpp index 8d684c5140..9b16d861c8 100755 --- a/src/common/backend/utils/adt/selfuncs.cpp +++ b/src/common/backend/utils/adt/selfuncs.cpp @@ -1779,6 +1779,141 @@ Selectivity nulltestsel( return (Selectivity)selec; } +/* + * nantestsel - Selectivity of NanTest Node. + */ +Selectivity nantestsel( + PlannerInfo* root, NanTestType nantesttype, Node* arg, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo) +{ + VariableStatData vardata; + vardata.statsTuple = NULL; + vardata.freefunc = NULL; + vardata.rel = NULL; + vardata.var = NULL; + double selec; + + examine_variable(root, arg, varRelid, &vardata); + + if (HeapTupleIsValid(vardata.statsTuple)) { + Form_pg_statistic stats; + double freq_nan; + + stats = (Form_pg_statistic)GETSTRUCT(vardata.statsTuple); + freq_nan = var_eq_const(&vardata, FLOAT8EQOID, Float8GetDatum(get_float8_nan()), false, true); + + switch (nantesttype) { + case IS_NAN: + + /* Use freq_nan directly. */ + selec = freq_nan; + break; + case IS_NOT_NAN: + + selec = 1.0 - stats->stanullfrac - freq_nan; + break; + default: + ereport(ERROR, + (errmodule(MOD_OPT), + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized nantesttype: %d", (int)nantesttype)))); + + return (Selectivity)0; /* keep compiler quiet */ + } + } else { + /* + * No ANALYZE stats available, so make a guess + */ + switch (nantesttype) { + case IS_NAN: + selec = DEFAULT_UNK_SEL; + break; + case IS_NOT_NAN: + selec = DEFAULT_NOT_UNK_SEL; + break; + default: + ereport(ERROR, + (errmodule(MOD_OPT), + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized nantesttype: %d", (int)nantesttype)))); + return (Selectivity)0; /* keep compiler quiet */ + } + } + + ReleaseVariableStats(vardata); + + /* result should be in range, but make sure... */ + CLAMP_PROBABILITY(selec); + + return (Selectivity)selec; +} + +/* + * infinitetestsel - Selectivity of InfiniteTest Node. + */ +Selectivity infinitetestsel( + PlannerInfo* root, InfiniteTestType infinitetesttype, Node* arg, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo) +{ + VariableStatData vardata; + vardata.statsTuple = NULL; + vardata.freefunc = NULL; + vardata.rel = NULL; + vardata.var = NULL; + double selec; + + examine_variable(root, arg, varRelid, &vardata); + + if (HeapTupleIsValid(vardata.statsTuple)) { + Form_pg_statistic stats; + double freq_inf; + + stats = (Form_pg_statistic)GETSTRUCT(vardata.statsTuple); + freq_inf = var_eq_const(&vardata, FLOAT8EQOID, Float8GetDatum(get_float8_infinity()), false, true); + + switch (infinitetesttype) { + case IS_INFINITE: + + selec = freq_inf; + break; + case IS_NOT_INFINITE: + + selec = 1.0 - stats->stanullfrac - freq_inf; + break; + default: + ereport(ERROR, + (errmodule(MOD_OPT), + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized infinitetesttype: %d", (int)infinitetesttype)))); + + return (Selectivity)0; /* keep compiler quiet */ + } + } else { + /* + * No ANALYZE stats available, so make a guess + */ + switch (infinitetesttype) { + case IS_INFINITE: + selec = DEFAULT_UNK_SEL; + break; + case IS_NOT_INFINITE: + selec = DEFAULT_NOT_UNK_SEL; + break; + default: + ereport(ERROR, + (errmodule(MOD_OPT), + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized infinitetesttype: %d", (int)infinitetesttype)))); + return (Selectivity)0; /* keep compiler quiet */ + } + } + + ReleaseVariableStats(vardata); + + /* result should be in range, but make sure... */ + CLAMP_PROBABILITY(selec); + + return (Selectivity)selec; +} + /* * strip_array_coercion - strip binary-compatible relabeling from an array expr * diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 67931c80a2..4ef6b4cecb 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -75,12 +75,13 @@ bool will_shutdown = false; * NEXT | 92899 | ? | ? * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92939; +const uint32 GRAND_VERSION_NUM = 92940; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 NAN_INFINITE_TEST_EXPR = 92940; const uint32 STRAIGHT_JOIN_VERSION_NUMBER = 92939; const uint32 ROTATE_UNROTATE_VERSION_NUM = 92937; const uint32 PIPELINED_FUNCTION_VERSION_NUM = 92936; diff --git a/src/common/interfaces/libpq/frontend_parser/gram.y b/src/common/interfaces/libpq/frontend_parser/gram.y index 523899fe07..d6e7f7260f 100755 --- a/src/common/interfaces/libpq/frontend_parser/gram.y +++ b/src/common/interfaces/libpq/frontend_parser/gram.y @@ -533,7 +533,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; DROP DUPLICATE DISCONNECT DUMPFILE EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE - END_P ENDS ENFORCED ENUM_P ERRORS ESCAPE EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE + END_P ENDS ENFORCED ENUM_P ERROR_P ERRORS ESCAPE EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE EXCLUDE EXCLUDED EXCLUDING EXCLUSIVE EXECUTE EXPIRED_P EXISTS EXPLAIN EXTENSION EXTERNAL EXTRACT ESCAPED @@ -546,7 +546,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; HANDLER HAVING HDFSDIRECTORY HEADER_P HOLD HOUR_P HOUR_MINUTE_P HOUR_SECOND_P IDENTIFIED IDENTITY_P IF_P IGNORE IGNORE_EXTRA_DATA ILIKE IMMEDIATE IMMUTABLE IMPLICIT_P IN_P - INCLUDE INCLUDING INCREMENT INCREMENTAL INDEX INDEXES INFILE INHERIT INHERITS INITIAL_P INITIALLY INITRANS INLINE_P + INCLUDE INCLUDING INCREMENT INCREMENTAL INDEX INDEXES INFILE INFINITE_P INHERIT INHERITS INITIAL_P INITIALLY INITRANS INLINE_P INNER_P INOUT INPUT_P INSENSITIVE INSERT INSTEAD INT_P INTEGER INTERNAL INTERSECT INTERVAL INTO INVISIBLE INVOKER IP IS ISNULL ISOLATION @@ -559,7 +559,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; LOCATION LOCK_P LOCKED LOG_P LOGGING LOGIN_ANY LOGIN_SUCCESS LOGIN_FAILURE LOGOUT LOOP MAPPING MASKING MASTER MASTR MATCH MATERIALIZED MATCHED MAXEXTENTS MAXSIZE MAXTRANS MAXVALUE MERGE MESSAGE_TEXT METHOD MINUS_P MINUTE_P MINUTE_SECOND_P MINVALUE MINEXTENTS MODE MODIFY_P MONTH_P MOVE MOVEMENT MODEL MYSQL_ERRNO// DB4AI - NAME_P NAMES NATIONAL NATURAL NCHAR NEXT NLSSORT NO NOCOMPRESS NOCYCLE NODE NOLOGGING NOMAXVALUE NOMINVALUE NONE + NAME_P NAMES NAN_P NATIONAL NATURAL NCHAR NEXT NLSSORT NO NOCOMPRESS NOCYCLE NODE NOLOGGING NOMAXVALUE NOMINVALUE NONE NOT NOTHING NOTIFY NOTNULL NOVALIDATE NOWAIT NULL_P NULLCOLS NULLIF NULLS_P NUMBER_P NUMERIC NUMSTR NVARCHAR NVARCHAR2 NVL OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR @@ -11792,6 +11792,7 @@ unreserved_keyword: | INDEX | INDEXES | INFILE + | INFINITE_P | INHERIT | INHERITS | INITIAL_P @@ -11856,6 +11857,7 @@ unreserved_keyword: | MOVEMENT | NAME_P | NAMES + | NAN_P | NEXT | NO | NOCOMPRESS diff --git a/src/gausskernel/cbb/instruments/utils/unique_query.cpp b/src/gausskernel/cbb/instruments/utils/unique_query.cpp index 7c0ad6997a..d770bf79b6 100755 --- a/src/gausskernel/cbb/instruments/utils/unique_query.cpp +++ b/src/gausskernel/cbb/instruments/utils/unique_query.cpp @@ -645,6 +645,20 @@ void UniqueSql::JumbleExpr(pgssJumbleState* jstate, Node* node) break; } + case T_NanTest: { + NanTest* nt = (NanTest*)node; + APP_JUMB(nt->nantesttype); + UniqueSql::JumbleExpr(jstate, (Node*)nt->arg); + + break; + } + case T_InfiniteTest: { + InfiniteTest* it = (InfiniteTest*)node; + APP_JUMB(it->infinitetesttype); + UniqueSql::JumbleExpr(jstate, (Node*)it->arg); + + break; + } case T_BooleanTest: { BooleanTest* bt = (BooleanTest*)node; APP_JUMB(bt->booltesttype); diff --git a/src/gausskernel/optimizer/path/clausesel.cpp b/src/gausskernel/optimizer/path/clausesel.cpp index b89d68697a..f89f83900b 100755 --- a/src/gausskernel/optimizer/path/clausesel.cpp +++ b/src/gausskernel/optimizer/path/clausesel.cpp @@ -737,6 +737,14 @@ Selectivity clause_selectivity(PlannerInfo* root, Node* clause, int varRelid, Jo /* Use node specific selectivity calculation function */ s1 = nulltestsel( root, ((NullTest*)clause)->nulltesttype, (Node*)((NullTest*)clause)->arg, varRelid, jointype, sjinfo); + } else if (IsA(clause, NanTest)) { + /* Use node specific selectivity calculation function */ + s1 = nantestsel( + root, ((NanTest*)clause)->nantesttype, (Node*)((NanTest*)clause)->arg, varRelid, jointype, sjinfo); + } else if (IsA(clause, InfiniteTest)) { + /* Use node specific selectivity calculation function */ + s1 = infinitetestsel( + root, ((InfiniteTest*)clause)->infinitetesttype, (Node*)((InfiniteTest*)clause)->arg, varRelid, jointype, sjinfo); } else if (IsA(clause, BooleanTest)) { /* Use node specific selectivity calculation function */ s1 = booltestsel( diff --git a/src/gausskernel/optimizer/path/costsize.cpp b/src/gausskernel/optimizer/path/costsize.cpp index b58afdacdd..caa2c27c08 100755 --- a/src/gausskernel/optimizer/path/costsize.cpp +++ b/src/gausskernel/optimizer/path/costsize.cpp @@ -4969,6 +4969,19 @@ static bool cost_qual_eval_walker(Node* node, cost_qual_eval_context* context) * phexpr. */ return false; + } else if (IsA(node, NanTest) || IsA(node, InfiniteTest)) { + /* + * Add NanTest | InfiniteTest 0.1 cup_operator_cost, to make the these two + * condition executeed executeed after '='、'IN (x, x)' condition, and before + * 'IN (x, x, x, ...)' condigtion. + * + * Firstly, we don't care about order when input type is float8, and others + * type cast to float8 must call castfunc, which usually take 1 cpu_operator_cost. + * 'In (...)' condition cost = array length * 0.5(see ScalarArrayOpExpr). + * So we can calc (2 * 0.5) ~ (3 * 0.5) - 1 = 0 ~ 0.5, and We take 0.1 to make + * the impact minimize. + */ + context->total.per_tuple += u_sess->attr.attr_sql.cpu_operator_cost * 0.1; } /* recurse into children */ diff --git a/src/gausskernel/optimizer/prep/prepqual.cpp b/src/gausskernel/optimizer/prep/prepqual.cpp index 4008340967..b3fff7ed45 100644 --- a/src/gausskernel/optimizer/prep/prepqual.cpp +++ b/src/gausskernel/optimizer/prep/prepqual.cpp @@ -189,6 +189,22 @@ Node* negate_clause(Node* node) return (Node*)newexpr; } } break; + case T_NanTest: { + NanTest* expr = (NanTest*)node; + + NanTest* newexpr = makeNode(NanTest); + newexpr->arg = expr->arg; + newexpr->nantesttype = (expr->nantesttype == IS_NAN ? IS_NOT_NAN : IS_NAN); + return (Node*)newexpr; + } break; + case T_InfiniteTest: { + InfiniteTest* expr = (InfiniteTest*)node; + + InfiniteTest* newexpr = makeNode(InfiniteTest); + newexpr->arg = expr->arg; + newexpr->infinitetesttype = (expr->infinitetesttype == IS_INFINITE ? IS_NOT_INFINITE : IS_INFINITE); + return (Node*)newexpr; + } break; case T_BooleanTest: { BooleanTest* expr = (BooleanTest*)node; BooleanTest* newexpr = makeNode(BooleanTest); diff --git a/src/gausskernel/runtime/executor/execExpr.cpp b/src/gausskernel/runtime/executor/execExpr.cpp index af1a6baeca..bcc9be4875 100644 --- a/src/gausskernel/runtime/executor/execExpr.cpp +++ b/src/gausskernel/runtime/executor/execExpr.cpp @@ -2057,6 +2057,66 @@ ExecInitExprRec(Expr *node, ExprState *state, ExprEvalPushStep(state, &scratch); break; } + case T_NanTest: + { + NanTest *ntest = (NanTest *) node; + + if (ntest ->nantesttype == IS_NAN) + { + scratch.opcode = EEOP_NANTEST_ISNAN; + } + else if (ntest->nantesttype == IS_NOT_NAN) + { + scratch.opcode = EEOP_NANTEST_ISNOTNAN; + } + else + { + elog(ERROR, "unrecognized nantesttype: %d", + (int) ntest->nantesttype); + } + + scratch.d.decspecexpr.value = (Datum *) palloc(sizeof(Datum)); + scratch.d.decspecexpr.isnull = (bool *) palloc(sizeof(bool)); + /* first evaluate argument into result variable */ + ExecInitExprRec(ntest->arg, state, + scratch.d.decspecexpr.value, scratch.d.decspecexpr.isnull, + node); + scratch.d.decspecexpr.expr = node; + + /* then push the test of that argument */ + ExprEvalPushStep(state, &scratch); + break; + } + case T_InfiniteTest: + { + InfiniteTest *itest = (InfiniteTest *) node; + + if (itest->infinitetesttype == IS_INFINITE) + { + scratch.opcode = EEOP_INFINITETEST_ISINFINITE; + } + else if (itest->infinitetesttype == IS_NOT_INFINITE) + { + scratch.opcode = EEOP_INFINITETEST_ISNOTINFINITE; + } + else + { + elog(ERROR, "unrecognized infinitetesttype: %d", + (int) itest->infinitetesttype); + } + + scratch.d.decspecexpr.value = (Datum *) palloc(sizeof(Datum)); + scratch.d.decspecexpr.isnull = (bool *) palloc(sizeof(bool)); + /* first evaluate argument into result variable */ + ExecInitExprRec(itest->arg, state, + scratch.d.decspecexpr.value, scratch.d.decspecexpr.isnull, + node); + scratch.d.decspecexpr.expr = node; + + /* then push the test of that argument */ + ExprEvalPushStep(state, &scratch); + break; + } default: elog(ERROR, "unrecognized node type: %d, line=%d, func:%s", (int) nodeTag(node), __LINE__, __func__); diff --git a/src/gausskernel/runtime/executor/execExprInterp.cpp b/src/gausskernel/runtime/executor/execExprInterp.cpp index b7ea3dae15..0f30f0217d 100644 --- a/src/gausskernel/runtime/executor/execExprInterp.cpp +++ b/src/gausskernel/runtime/executor/execExprInterp.cpp @@ -151,6 +151,8 @@ static TupleDesc get_cached_rowtype(Oid type_id, int32 typmod, static void ShutdownTupleDescRef(Datum arg); static void ExecEvalRowNullInt(ExprState *state, ExprEvalStep *op, ExprContext *econtext, bool checkisnull); +static void ExecEvalNanInt(ExprState *state, ExprEvalStep *op, ExprContext *econtext, bool checkisnan); +static void ExecEvalInfiniteInt(ExprState *state, ExprEvalStep *op, ExprContext *econtext, bool checkisnan); /* fast-path evaluation functions */ static Datum ExecJustInnerVar(ExprState *state, ExprContext *econtext, bool *isnull, ExprDoneCond* isDone); @@ -625,6 +627,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull, ExprDoneCo &&CASE_EEOP_AGG_COLLECT_PLAIN_TRANS_BYREF, &&CASE_EEOP_AGG_ORDERED_TRANS_DATUM, &&CASE_EEOP_AGG_ORDERED_TRANS_TUPLE, + &&CASE_EEOP_NANTEST_ISNAN, + &&CASE_EEOP_NANTEST_ISNOTNAN, + &&CASE_EEOP_INFINITETEST_ISINFINITE, + &&CASE_EEOP_INFINITETEST_ISNOTINFINITE, &&CASE_EEOP_LAST }; @@ -2180,6 +2186,34 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull, ExprDoneCo EEO_NEXT(); } + EEO_CASE(EEOP_NANTEST_ISNAN) + { + ExecEvalNan(state, op, econtext); + + EEO_NEXT(); + } + + EEO_CASE(EEOP_NANTEST_ISNOTNAN) + { + ExecEvalNotNan(state, op, econtext); + + EEO_NEXT(); + } + + EEO_CASE(EEOP_INFINITETEST_ISINFINITE) + { + ExecEvalInfinite(state, op, econtext); + + EEO_NEXT(); + } + + EEO_CASE(EEOP_INFINITETEST_ISNOTINFINITE) + { + ExecEvalNotInfinite(state, op, econtext); + + EEO_NEXT(); + } + EEO_CASE(EEOP_LAST) { /* unreachable */ @@ -2675,6 +2709,26 @@ ExecEvalRowNotNull(ExprState *state, ExprEvalStep *op, ExprContext *econtext) ExecEvalRowNullInt(state, op, econtext, false); } +void ExecEvalNan(ExprState *state, ExprEvalStep *op, ExprContext *econtext) +{ + ExecEvalNanInt(state, op, econtext, true); +} + +void ExecEvalNotNan(ExprState *state, ExprEvalStep *op, ExprContext *econtext) +{ + ExecEvalNanInt(state, op, econtext, false); +} + +void ExecEvalInfinite(ExprState *state, ExprEvalStep *op, ExprContext *econtext) +{ + ExecEvalInfiniteInt(state, op, econtext, true); +} + +void ExecEvalNotInfinite(ExprState *state, ExprEvalStep *op, ExprContext *econtext) +{ + ExecEvalInfiniteInt(state, op, econtext, false); +} + static Datum CheckRowTypeIsNull(TupleDesc tupDesc, HeapTupleData tmptup, bool checkisnull) { int att; @@ -2783,6 +2837,83 @@ ExecEvalRowNullInt(ExprState *state, ExprEvalStep *op, *op->resvalue = CheckRowTypeIsNull(tupDesc, tmptup, checkisnull); } +/* Common code for IS [NOT] NAN on value */ +static void ExecEvalNanInt(ExprState *state, ExprEvalStep *op, + ExprContext *econtext, bool checkisnan) +{ + Datum value = *op->d.decspecexpr.value; + bool isnull = *op->d.decspecexpr.isnull; + Expr *expr = op->d.decspecexpr.expr; + bool resnan; + float8 val; + Oid inputtype; + + /* + * Evaluate value IS [NOT] NAN. + * if number value is NAN and without NOT, return true, else false; if with NOT, the result is reversed; + * if charater string is 'NAN'、'INF'(ignore case) or others which can't cast to float8, throw errors; + * Especially for NULL, no matter IS_NAN or IS_NOT_NAN all return NULL. + */ + + *op->resnull = isnull; + if (isnull) return; + + val = DatumGetFloat8(value); + resnan = isnan(val); + inputtype = deparseNodeForInputype(expr, T_NanTest, val); + + /* Only float8, float4, numeric support NAN */ + if (resnan && !(inputtype == FLOAT8OID || inputtype == FLOAT4OID || inputtype == NUMERICOID)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input for IS [NOT] NAN"))); + } + + /* + * resnan = true, checkisnan = true -> true; + * resnan = true, checkisnan = false -> false; + * resnan = false, checkisnan = true -> false; + * resnan = false, checkisnan = false -> true; + */ + *op->resvalue = BoolGetDatum(resnan == checkisnan); +} + +/* Common code for IS [NOT] INFINITE on value */ +static void ExecEvalInfiniteInt(ExprState *state, ExprEvalStep *op, + ExprContext *econtext, bool checkisinf) +{ + Datum value = *op->d.decspecexpr.value; + bool isnull = *op->d.decspecexpr.isnull; + Expr *expr = op->d.decspecexpr.expr; + bool resinf; + float8 val; + Oid inputtype; + + /* + * Evaluate value IS [NOT] INFINITE. + * if number value is INFINITE and without NOT, return true, else false; if with NOT, the result is reversed; + * if charater string is 'INF'、'NAN' (ignore case) or others which can't cast to float8, thrown errors; + * Especially for NULL, no matter IS_INFINITE or IS_NOT_INFINITE all return NULL. + */ + + *op->resnull = isnull; + if (isnull) return; + + val = DatumGetFloat8(value); + resinf = isinf(DatumGetFloat8(value)); + inputtype = deparseNodeForInputype(expr, T_InfiniteTest, val); + + /* Only float8, float4 support INF */ + if (resinf && !(inputtype == FLOAT8OID || inputtype == FLOAT4OID)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input for IS [NOT] INFINITE"))); + } + + /* see "NAN" */ + *op->resvalue = BoolGetDatum(resinf == checkisinf); +} + /* * Evaluate an ARRAY[] expression. * diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index ba8a50db3a..49de922b42 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -135,6 +135,8 @@ static Datum ExecEvalMinMax(MinMaxExprState* minmaxExpr, ExprContext* econtext, static Datum ExecEvalXml(XmlExprState* xmlExpr, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalNullIf(FuncExprState* nullIfExpr, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalNullTest(NullTestState* nstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); +static Datum ExecEvalNanTest(NanTestState* nstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); +static Datum ExecEvalInfiniteTest(InfiniteTestState* nstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalHashFilter(HashFilterState* hstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalBooleanTest(GenericExprState* bstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalCoerceToDomain( @@ -1260,6 +1262,14 @@ static void find_uservar_in_expr(ExprState *root, char *return_name, bool *if_us NullTestState* parent = (NullTestState*)root; find_uservar_in_expr(parent->arg,return_name, if_use); } break; + case T_NanTestState: { + NanTestState* parent = (NanTestState*)root; + find_uservar_in_expr(parent->arg,return_name, if_use); + } break; + case T_InfiniteTestState: { + InfiniteTestState* parent = (InfiniteTestState*)root; + find_uservar_in_expr(parent->arg,return_name, if_use); + } break; case T_SubPlanState: break; default: { @@ -5108,6 +5118,192 @@ static Datum ExecEvalNullTest(NullTestState* nstate, ExprContext* econtext, bool } } +static bool check_val_for_nan_or_infinite(NodeTag nodeTag, float8 val) +{ + /* + * Charater 'inf'、'Infinity' is unsupported in expression IS [NOT] NAN. + * Charater 'NaN' is unsupported is expression IS [NOT] INFINITE. + */ + if ((nodeTag == T_NanTest && isinf(val)) || (nodeTag == T_InfiniteTest && isnan(val))) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input for IS [NOT] %s", nodeTag == T_NanTest ? "NAN" : "INFINITE"))); + return false; // Sustain return statement to uphold logical integrity + } + + return true; +} + +static Oid deparse_funcexpr_for_input_type(FuncExpr* funcexpr, NodeTag type, float8 val) +{ + Oid inputtype = InvalidOid; + ExprState* exprstate = (ExprState*) lfirst(list_head(funcexpr->args)); + if (IsA(exprstate, Param)) { + Param* param = (Param*) exprstate; + inputtype = param->paramtype; + } else if (IsA(exprstate, Var)) { + if (check_val_for_nan_or_infinite(type, val)) { + Var* var = (Var*) exprstate; + inputtype = var->vartype; + } + } else + inputtype = exprstate->resultType; + + return inputtype; +} + +Oid deparseNodeForInputype(Expr *expr, NodeTag type, float8 val) +{ + Expr *argexpr; + Oid inputtype = InvalidOid; + + if (type == T_NanTest) { + argexpr = ((NanTest *) expr)->arg; + } else if (type = T_InfiniteTest) { + argexpr = ((InfiniteTest *) expr)->arg; + } else { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized node type: %d", (int)type))); + } + + if (IsA(argexpr, Var)) { + Var* var = (Var*) argexpr; + inputtype = var->vartype; + + } else if (IsA(argexpr, FuncExpr)) { + inputtype = deparse_funcexpr_for_input_type((FuncExpr *) argexpr, type, val); + + } else if (IsA(argexpr, Const)) { + check_val_for_nan_or_infinite(type, val); + + } else if (IsA(argexpr, OpExpr)) { + OpExpr* opexpr = (OpExpr *) argexpr; + inputtype = opexpr->opresulttype; + + } else if (IsA(argexpr, Param)) { + Param* param = (Param*) argexpr; + inputtype = param->paramtype; + + } else { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized node type: %d", (int)type))); + } + + return inputtype; +} + +/* ---------------------------------------------------------------- +* ExecEvalNanTest +* +* Evaluate a NanTest node. +* ---------------------------------------------------------------- +*/ +static Datum ExecEvalNanTest(NanTestState* nstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone) +{ + NanTest* ntest = (NanTest*)nstate->xprstate.expr; + Datum result; + bool resultnan; + Oid inputtype; + float8 val; + + result = ExecEvalExpr(nstate->arg, econtext, isNull, isDone); + + if (isDone && *isDone == ExprEndResult) + return result; /* nothing to check */ + + /* + * Evaluate value IS [NOT] NAN. + * if number value is NAN and without NOT, return true, else false; if with NOT, the result is reversed; + * if charater string is 'NAN'、'INF'(ignore case) or others which can't cast to float8, throw errors; + * Especially for NULL, no matter IS_NAN or IS_NOT_NAN all return NULL. + */ + + if (*isNull) { + return (Datum)0; + } + + val = DatumGetFloat8(result); + resultnan = isnan(val); + inputtype = deparseNodeForInputype((Expr *)ntest, T_NanTest, val); + + /* Only float8, float4, numeric support NAN */ + if (resultnan && !(inputtype == FLOAT8OID || inputtype == FLOAT4OID || inputtype == NUMERICOID)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input for IS [NOT] NAN"))); + } + + switch (ntest->nantesttype) { + case IS_NAN: + return BoolGetDatum(resultnan); + case IS_NOT_NAN: + return BoolGetDatum(!resultnan); + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmodule(MOD_EXECUTOR), + errmsg("unrecognized nantesttype: %d", (int)ntest->nantesttype))); + return (Datum)0; /* keep compiler quiet */ + } +} + +/* ---------------------------------------------------------------- +* ExecEvalInfiniteTest +* +* Evaluate a InfiniteTest node. +* ---------------------------------------------------------------- +*/ +static Datum ExecEvalInfiniteTest(InfiniteTestState* nstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone) +{ + InfiniteTest* itest = (InfiniteTest*)nstate->xprstate.expr; + Datum result; + bool resultinf; + Oid inputtype; + float8 val; + + result = ExecEvalExpr(nstate->arg, econtext, isNull, isDone); + + if (isDone && *isDone == ExprEndResult) + return result; /* nothing to check */ + + /* + * Evaluate value IS [NOT] INFINITE. + * if number value is INFINITE and without NOT, return true, else false; if with NOT, the result is reversed; + * if charater string is 'INF'、'NAN' (ignore case) or others which can't cast to float8, thrown errors; + * Especially for NULL, no matter IS_INFINITE or IS_NOT_INFINITE all return NULL. + */ + + if (*isNull) { + return (Datum)0; + } + + val = DatumGetFloat8(result); + resultinf = isinf(val); + inputtype = deparseNodeForInputype((Expr *)itest, T_InfiniteTest, val); + + /* Only float8, float4 support INF */ + if (resultinf && !(inputtype == FLOAT8OID || inputtype == FLOAT4OID)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input for IS [NOT] INFINITE"))); + } + + switch (itest->infinitetesttype) { + case IS_INFINITE: + return BoolGetDatum(resultinf); + case IS_NOT_INFINITE: + return BoolGetDatum(!resultinf); + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmodule(MOD_EXECUTOR), + errmsg("unrecognized infinitetesttype: %d", (int)itest->infinitetesttype))); + return (Datum)0; /* keep compiler quiet */ + } +} + /* ---------------------------------------------------------------- * ExecEvalHashFilter * @@ -6396,6 +6592,24 @@ ExprState* ExecInitExprByRecursion(Expr* node, PlanState* parent) nstate->arg = ExecInitExprByRecursion(ntest->arg, parent); nstate->argdesc = NULL; state = (ExprState*)nstate; + } break; + case T_NanTest: { + NanTest* ntest = (NanTest*)node; + NanTestState* nstate = makeNode(NanTestState); + nstate->xprstate.is_flt_frame = false; + + nstate->xprstate.evalfunc = (ExprStateEvalFunc)ExecEvalNanTest; + nstate->arg = ExecInitExprByRecursion(ntest->arg, parent); + state = (ExprState*)nstate; + } break; + case T_InfiniteTest: { + InfiniteTest* itest = (InfiniteTest*)node; + InfiniteTestState* nstate = makeNode(InfiniteTestState); + nstate->xprstate.is_flt_frame = false; + + nstate->xprstate.evalfunc = (ExprStateEvalFunc)ExecEvalInfiniteTest; + nstate->arg = ExecInitExprByRecursion(itest->arg, parent); + state = (ExprState*)nstate; } break; case T_HashFilter: { HashFilter* htest = (HashFilter*)node; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql new file mode 100644 index 0000000000..0759064022 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql @@ -0,0 +1,11 @@ +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql new file mode 100644 index 0000000000..0759064022 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql @@ -0,0 +1,11 @@ +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql new file mode 100644 index 0000000000..634287732d --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql @@ -0,0 +1,67 @@ +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7012; +CREATE FUNCTION pg_catalog.to_binary_float(text, text, bool) +RETURNS float4 +as 'to_binary_float_text' +LANGUAGE INTERNAL +IMMUTABLE; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7013; +CREATE FUNCTION pg_catalog.to_binary_float(text) +RETURNS float4 AS +$$ +BEGIN + RETURN (select pg_catalog.to_binary_float($1, ' ', false)); +END; +$$ +LANGUAGE plpgsql; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text) IS 'convert text to a single precision floating-point number'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7014; +CREATE FUNCTION pg_catalog.to_binary_float(float8, float8, bool) +RETURNS float4 +as 'to_binary_float_number' +LANGUAGE INTERNAL +IMMUTABLE; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7015; +CREATE FUNCTION pg_catalog.to_binary_float(float8) +RETURNS float4 AS +$$ +BEGIN + RETURN (select pg_catalog.to_binary_float($1, 0, false)); +END; +$$ +LANGUAGE plpgsql; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8) IS 'convert float8 to a single precision floating-point number'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7016; +CREATE FUNCTION pg_catalog.to_binary_float(float8, text, bool) +RETURNS float4 AS +$$ +BEGIN + RETURN (select pg_catalog.to_binary_float($1, 0, false)); +END; +$$ +LANGUAGE plpgsql; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7017; +CREATE FUNCTION pg_catalog.to_binary_float(text, float8, bool) +RETURNS float4 +as 'to_binary_float_text_number' +LANGUAGE INTERNAL +IMMUTABLE; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql new file mode 100644 index 0000000000..634287732d --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql @@ -0,0 +1,67 @@ +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7012; +CREATE FUNCTION pg_catalog.to_binary_float(text, text, bool) +RETURNS float4 +as 'to_binary_float_text' +LANGUAGE INTERNAL +IMMUTABLE; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7013; +CREATE FUNCTION pg_catalog.to_binary_float(text) +RETURNS float4 AS +$$ +BEGIN + RETURN (select pg_catalog.to_binary_float($1, ' ', false)); +END; +$$ +LANGUAGE plpgsql; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text) IS 'convert text to a single precision floating-point number'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7014; +CREATE FUNCTION pg_catalog.to_binary_float(float8, float8, bool) +RETURNS float4 +as 'to_binary_float_number' +LANGUAGE INTERNAL +IMMUTABLE; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7015; +CREATE FUNCTION pg_catalog.to_binary_float(float8) +RETURNS float4 AS +$$ +BEGIN + RETURN (select pg_catalog.to_binary_float($1, 0, false)); +END; +$$ +LANGUAGE plpgsql; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8) IS 'convert float8 to a single precision floating-point number'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7016; +CREATE FUNCTION pg_catalog.to_binary_float(float8, text, bool) +RETURNS float4 AS +$$ +BEGIN + RETURN (select pg_catalog.to_binary_float($1, 0, false)); +END; +$$ +LANGUAGE plpgsql; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; + + +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7017; +CREATE FUNCTION pg_catalog.to_binary_float(text, float8, bool) +RETURNS float4 +as 'to_binary_float_text_number' +LANGUAGE INTERNAL +IMMUTABLE; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 06196dcfea..136dd6d932 100755 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -320,6 +320,7 @@ extern ExprState* ExecPrepareExpr(Expr* node, EState* estate); extern ExprState *ExecPrepareCheck(List *qual, EState *estate); extern List *ExecPrepareExprList(List *nodes, EState *estate); extern bool ExecCheck(ExprState *state, ExprContext *context); +extern Oid deparseNodeForInputype(Expr *expr, NodeTag nodeTag, float8 val); /** * new expr diff --git a/src/include/nodes/execExpr.h b/src/include/nodes/execExpr.h index 70d73ae2fa..b69bc05221 100644 --- a/src/include/nodes/execExpr.h +++ b/src/include/nodes/execExpr.h @@ -255,6 +255,14 @@ typedef enum ExprEvalOp EEOP_AGG_ORDERED_TRANS_DATUM, EEOP_AGG_ORDERED_TRANS_TUPLE, + /* perform NAN tests for scalar values */ + EEOP_NANTEST_ISNAN, + EEOP_NANTEST_ISNOTNAN, + + /* perform INFINITE tests for scalar values */ + EEOP_INFINITETEST_ISINFINITE, + EEOP_INFINITETEST_ISNOTINFINITE, + /* non-existent operation, used e.g. to check array lengths */ EEOP_LAST } ExprEvalOp; @@ -374,6 +382,14 @@ typedef struct ExprEvalStep TupleDesc argdesc; } nulltest_row; + /* for EEOP_NANTEST_IS[NOT]NAN | IS[NOT]INFINITE */ + struct + { + Datum *value; + bool *isnull; + Expr *expr; + } decspecexpr; + /* for EEOP_PARAM_EXEC/EXTERN */ struct { @@ -731,6 +747,10 @@ extern void ExecEvalRowNull(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalRowNotNull(ExprState *state, ExprEvalStep *op, ExprContext *econtext); +extern void ExecEvalNan(ExprState *state, ExprEvalStep *op, ExprContext *econtext); +extern void ExecEvalNotNan(ExprState *state, ExprEvalStep *op, ExprContext *econtext); +extern void ExecEvalInfinite(ExprState *state, ExprEvalStep *op, ExprContext *econtext); +extern void ExecEvalNotInfinite(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalArrayExpr(ExprState *state, ExprEvalStep *op); extern void ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op); extern void ExecEvalRow(ExprState *state, ExprEvalStep *op); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index f7735ed2ce..577d35de03 100755 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1266,6 +1266,24 @@ typedef struct NullTestState { TupleDesc argdesc; /* tupdesc for most recent input */ } NullTestState; +/* ---------------- + * NanTestState node + * ---------------- + */ +typedef struct NanTestState { + ExprState xprstate; + ExprState* arg; /* input expression */ +} NanTestState; + +/* ---------------- + * InfiniteTestState node + * ---------------- + */ +typedef struct InfiniteTestState { + ExprState xprstate; + ExprState* arg; /* input expression */ +} InfiniteTestState; + /* ---------------- * HashFilterState node * ---------------- diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 4b319d33b8..12819cea10 100755 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -260,6 +260,8 @@ typedef enum NodeTag { T_MinMaxExpr, T_XmlExpr, T_NullTest, + T_NanTest, + T_InfiniteTest, T_BooleanTest, T_CoerceToDomain, T_CoerceToDomainValue, @@ -322,6 +324,8 @@ typedef enum NodeTag { T_MinMaxExprState, T_XmlExprState, T_NullTestState, + T_NanTestState, + T_InfiniteTestState, T_HashFilterState, T_CoerceToDomainState, T_DomainConstraintState, @@ -416,6 +420,7 @@ typedef enum NodeTag { T_String, T_BitString, T_Null, + T_Nan, /* * TAGS FOR LIST NODES (pg_list.h) diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index e3924471ad..f6f1dcbfa9 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -1106,6 +1106,36 @@ typedef struct NullTest { bool argisrow; /* T to perform field-by-field null checks */ } NullTest; +/* ---------------- + * NanTest + * + * NanTest represents the operation of testing a value for Nan. + * The appropriate test is performed and returned as a boolean Datum. + * ---------------- + */ +typedef enum NanTestType { IS_NAN, IS_NOT_NAN } NanTestType; + +typedef struct NanTest { + Expr xpr; + Expr* arg; /* input expression */ + NanTestType nantesttype; /* IS NAN, IS NOT NAN */ +} NanTest; + +/* ---------------- + * InfiniteTest + * + * InfiniteTest represents the operation of testing a value for Infinite. + * The appropriate test is performed and returned as a boolean Datum. + * ---------------- + */ +typedef enum InfiniteTestType { IS_INFINITE, IS_NOT_INFINITE } InfiniteTestType; + +typedef struct InfiniteTest { + Expr xpr; + Expr* arg; /* input expression */ + InfiniteTestType infinitetesttype; /* IS INFINITE, IS NOT INFINITE */ +} InfiniteTest; + /* * BooleanTest * diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index 23082f704e..518e5247ec 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -235,6 +235,7 @@ PG_KEYWORD("ends", ENDS, UNRESERVED_KEYWORD) PG_KEYWORD("enforced", ENFORCED, UNRESERVED_KEYWORD) PG_KEYWORD("enum", ENUM_P, UNRESERVED_KEYWORD) PG_KEYWORD("eol", EOL, UNRESERVED_KEYWORD) +PG_KEYWORD("error", ERROR_P, UNRESERVED_KEYWORD) PG_KEYWORD("errors", ERRORS, UNRESERVED_KEYWORD) PG_KEYWORD("escape", ESCAPE, UNRESERVED_KEYWORD) PG_KEYWORD("escaped", ESCAPED, UNRESERVED_KEYWORD) @@ -318,6 +319,7 @@ PG_KEYWORD("incremental", INCREMENTAL, UNRESERVED_KEYWORD) PG_KEYWORD("index", INDEX, UNRESERVED_KEYWORD) PG_KEYWORD("indexes", INDEXES, UNRESERVED_KEYWORD) PG_KEYWORD("infile", INFILE, UNRESERVED_KEYWORD) +PG_KEYWORD("infinite", INFINITE_P, UNRESERVED_KEYWORD) PG_KEYWORD("inherit", INHERIT, UNRESERVED_KEYWORD) PG_KEYWORD("inherits", INHERITS, UNRESERVED_KEYWORD) PG_KEYWORD("initial", INITIAL_P, UNRESERVED_KEYWORD) @@ -405,6 +407,7 @@ PG_KEYWORD("movement", MOVEMENT, UNRESERVED_KEYWORD) PG_KEYWORD("mysql_errno", MYSQL_ERRNO, UNRESERVED_KEYWORD) PG_KEYWORD("name", NAME_P, UNRESERVED_KEYWORD) PG_KEYWORD("names", NAMES, UNRESERVED_KEYWORD) +PG_KEYWORD("nan", NAN_P, UNRESERVED_KEYWORD) PG_KEYWORD("national", NATIONAL, COL_NAME_KEYWORD) PG_KEYWORD("natural", NATURAL, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("nchar", NCHAR, COL_NAME_KEYWORD) diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 6ea6633ac5..f73bdd45b7 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -637,6 +637,9 @@ extern Datum float84ge(PG_FUNCTION_ARGS); extern Datum width_bucket_float8(PG_FUNCTION_ARGS); Datum float8_multiply_text(PG_FUNCTION_ARGS); Datum text_multiply_float8(PG_FUNCTION_ARGS); +extern Datum to_binary_float_text(PG_FUNCTION_ARGS); +extern Datum to_binary_float_number(PG_FUNCTION_ARGS); +extern Datum to_binary_float_text_number(PG_FUNCTION_ARGS); /* dbsize.c */ extern Datum pg_tablespace_size_oid(PG_FUNCTION_ARGS); diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h index c40f66b128..2fc296a95f 100644 --- a/src/include/utils/selfuncs.h +++ b/src/include/utils/selfuncs.h @@ -20,6 +20,7 @@ #include "nodes/relation.h" #include "optimizer/nodegroups.h" #include "parser/parse_oper.h" +#include "catalog/pg_operator.h" /* * Note: the default selectivity estimates are not chosen entirely at random. @@ -53,7 +54,7 @@ #define DEFAULT_SPECIAL_EXPR_DISTINCT 10 #define DEFAULT_SPECIAL_EXPR_BIASE (pow(u_sess->pgxc_cxt.NumDataNodes, (double)1 / 2) / u_sess->pgxc_cxt.NumDataNodes) -/* default selectivity estimate for boolean and null test nodes */ +/* default selectivity estimate for boolean、null、nan、infinite test nodes */ #define DEFAULT_UNK_SEL 0.005 #define DEFAULT_NOT_UNK_SEL (1.0 - DEFAULT_UNK_SEL) @@ -259,6 +260,10 @@ extern Selectivity booltestsel( PlannerInfo* root, BoolTestType booltesttype, Node* arg, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo); extern Selectivity nulltestsel( PlannerInfo* root, NullTestType nulltesttype, Node* arg, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo); +extern Selectivity nantestsel( + PlannerInfo* root, NanTestType nantesttype, Node* arg, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo); +extern Selectivity infinitetestsel( + PlannerInfo* root, InfiniteTestType infinitetesttype, Node* arg, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo); extern Selectivity scalararraysel(PlannerInfo* root, ScalarArrayOpExpr* clause, bool is_join_clause, int varRelid, JoinType jointype, SpecialJoinInfo* sjinfo); extern int estimate_array_length(Node* arrayexpr); diff --git a/src/test/regress/expected/expr_nantest_infinitetest.out b/src/test/regress/expected/expr_nantest_infinitetest.out new file mode 100644 index 0000000000..687a4e3dbf --- /dev/null +++ b/src/test/regress/expected/expr_nantest_infinitetest.out @@ -0,0 +1,1684 @@ +-- For expression IS [NOT] NAN|INFINITE +-- create new schema +drop schema if exists expr_nan; +NOTICE: schema "expr_nan" does not exist, skipping +create schema expr_nan; +set search_path=expr_nan; +CREATE TABLE tnf (c1 int, c2 bigint, c3 float8, c4 varchar(50), c5 text, c6 timestamp); +INSERT INTO tnf VALUES (1, 10000, 100.008, '1234.5679', '987654321', TIMESTAMP'2024-06-06 21:03:58'); +INSERT INTO tnf VALUES (2, -922337203, -1.79E+100, '-1.79E+100', '1.79E+100', TIMESTAMP'1970-01-01 00:00:00'); +INSERT INTO tnf VALUES (3, 922337203, 1.79E+100, '9,223,372,036,854,775,807', '1.79E+400', TIMESTAMP'2077-07-08 00:00:00'); +INSERT INTO tnf VALUES (4, NULL, NULL, NULL, NULL, NULL); +INSERT INTO tnf VALUES (5, -0, CAST('NaN' as float8), 'tonight', '12.34yesterday', 'today'); +INSERT INTO tnf VALUES (6, 3.14E+3, CAST('Inf' as float8), 'NaN', 'Inf', 'now'); +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT NAN; + c1 | c2 +----+------- + 1 | 10000 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT INFINITE; + c1 | c2 +----+------- + 1 | 10000 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT NAN; + c1 | c2 +----+------------ + 2 | -922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT INFINITE; + c1 | c2 +----+------------ + 2 | -922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT NAN; + c1 | c2 +----+----------- + 3 | 922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT INFINITE; + c1 | c2 +----+----------- + 3 | 922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT NAN; + c1 | c2 +----+---- + 5 | 0 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT INFINITE; + c1 | c2 +----+---- + 5 | 0 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT NAN; + c1 | c2 +----+------ + 6 | 3140 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT INFINITE; + c1 | c2 +----+------ + 6 | 3140 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT NAN; + c1 | c3 +----+--------- + 1 | 100.008 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT INFINITE; + c1 | c3 +----+--------- + 1 | 100.008 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT NAN; + c1 | c3 +----+------------ + 2 | -1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT INFINITE; + c1 | c3 +----+------------ + 2 | -1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT NAN; + c1 | c3 +----+----------- + 3 | 1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT INFINITE; + c1 | c3 +----+----------- + 3 | 1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NAN; + c1 | c3 +----+----- + 5 | NaN +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT INFINITE; + c1 | c3 +----+----- + 5 | NaN +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT NAN; + c1 | c3 +----+---------- + 6 | Infinity +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS INFINITE; + c1 | c3 +----+---------- + 6 | Infinity +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT NAN; + c1 | c4 +----+----------- + 1 | 1234.5679 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT INFINITE; + c1 | c4 +----+----------- + 1 | 1234.5679 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT NAN; + c1 | c4 +----+------------ + 2 | -1.79E+100 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT INFINITE; + c1 | c4 +----+------------ + 2 | -1.79E+100 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT NAN; + c1 | c5 +----+----------- + 1 | 987654321 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT INFINITE; + c1 | c5 +----+----------- + 1 | 987654321 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT NAN; + c1 | c5 +----+----------- + 2 | 1.79E+100 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT INFINITE; + c1 | c5 +----+----------- + 2 | 1.79E+100 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NAN; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT NAN; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS INFINITE; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT INFINITE; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +-- all error: can't cast timestamp to double precision +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT INFINITE; + ^ +SELECT c2 IS NAN, c2 IS NOT NAN, c2 IS INFINITE, c2 IS NOT INFINITE FROM tnf ORDER BY c1; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + f | t | f | t + f | t | f | t + f | t | f | t + | | | + f | t | f | t + f | t | f | t +(6 rows) + +SELECT c3 IS NAN, c3 IS NOT NAN, c3 IS INFINITE, c3 IS NOT INFINITE FROM tnf ORDER BY c1; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + f | t | f | t + f | t | f | t + f | t | f | t + | | | + t | f | f | t + f | t | t | f +(6 rows) + +SELECT c4 IS NAN, c4 IS NOT NAN, c4 IS INFINITE, c4 IS NOT INFINITE FROM tnf ORDER BY c1; +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c5 IS NAN, c5 IS NOT NAN, c5 IS INFINITE, c5 IS NOT INFINITE FROM tnf ORDER BY c1; +ERROR: "1.79E+400" is out of range for type double precision +SELECT c6 IS NAN, c6 IS NOT NAN, c6 IS INFINITE, c6 IS NOT INFINITE FROM tnf ORDER BY c1; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c6 IS NAN, c6 IS NOT NAN, c6 IS INFINITE, c6 IS NOT I... + ^ +-- not +SELECT c1, c3, NOT(c3 IS NOT NAN), NOT(c3 IS NAN) FROM tnf ORDER BY c1; + c1 | c3 | ?column? | ?column? +----+------------+----------+---------- + 1 | 100.008 | f | t + 2 | -1.79e+100 | f | t + 3 | 1.79e+100 | f | t + 4 | | | + 5 | NaN | t | f + 6 | Infinity | f | t +(6 rows) + +SELECT c1, c3, NOT(c3 IS NOT INFINITE), NOT(c3 IS INFINITE) FROM tnf ORDER BY c1; + c1 | c3 | ?column? | ?column? +----+------------+----------+---------- + 1 | 100.008 | f | t + 2 | -1.79e+100 | f | t + 3 | 1.79e+100 | f | t + 4 | | | + 5 | NaN | f | t + 6 | Infinity | t | f +(6 rows) + +-- collation +SELECT c1, c2, c3, c4, c5 FROM tnf ORDER BY (c3 IS NAN); + c1 | c2 | c3 | c4 | c5 +----+------------+------------+---------------------------+---------------- + 1 | 10000 | 100.008 | 1234.5679 | 987654321 + 2 | -922337203 | -1.79e+100 | -1.79E+100 | 1.79E+100 + 3 | 922337203 | 1.79e+100 | 9,223,372,036,854,775,807 | 1.79E+400 + 6 | 3140 | Infinity | NaN | Inf + 5 | 0 | NaN | tonight | 12.34yesterday + 4 | | | | +(6 rows) + +SELECT (1 + 1) IS NAN; + ?column? +---------- + f +(1 row) + +SELECT (c3 + 1) IS NAN FROM tnf; + ?column? +---------- + f + f + f + + t + f +(6 rows) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c3 + 1) IS NOT NAN; + c1 | c2 | c3 | c4 | c5 +----+------------+------------+---------------------------+----------- + 1 | 10000 | 100.008 | 1234.5679 | 987654321 + 2 | -922337203 | -1.79e+100 | -1.79E+100 | 1.79E+100 + 3 | 922337203 | 1.79e+100 | 9,223,372,036,854,775,807 | 1.79E+400 + 6 | 3140 | Infinity | NaN | Inf +(4 rows) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c4 + 1) IS NOT NAN; -- error +ERROR: invalid input syntax for type bigint: "1234.5679" +SELECT (SELECT c3 FROM tnf WHERE c1 = 3) IS NAN; + ?column? +---------- + f +(1 row) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (SELECT c3 FROM tnf WHERE c1 = 5) IS nan; + c1 | c2 | c3 | c4 | c5 +----+------------+------------+---------------------------+---------------- + 1 | 10000 | 100.008 | 1234.5679 | 987654321 + 2 | -922337203 | -1.79e+100 | -1.79E+100 | 1.79E+100 + 3 | 922337203 | 1.79e+100 | 9,223,372,036,854,775,807 | 1.79E+400 + 4 | | | | + 5 | 0 | NaN | tonight | 12.34yesterday + 6 | 3140 | Infinity | NaN | Inf +(6 rows) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2, 4, 5) AND c3 IS NAN; + c1 | c2 | c3 | c4 | c5 +----+----+-----+---------+---------------- + 5 | 0 | NaN | tonight | 12.34yesterday +(1 row) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2) AND c4 IS NAN; + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- +(0 rows) + +-- test enable_expr_fusion +set enable_expr_fusion=on; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT NAN; + c1 | c2 +----+------- + 1 | 10000 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT INFINITE; + c1 | c2 +----+------- + 1 | 10000 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT NAN; + c1 | c2 +----+------------ + 2 | -922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT INFINITE; + c1 | c2 +----+------------ + 2 | -922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT NAN; + c1 | c2 +----+----------- + 3 | 922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT INFINITE; + c1 | c2 +----+----------- + 3 | 922337203 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT NAN; + c1 | c2 +----+---- + 5 | 0 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT INFINITE; + c1 | c2 +----+---- + 5 | 0 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT NAN; + c1 | c2 +----+------ + 6 | 3140 +(1 row) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT INFINITE; + c1 | c2 +----+------ + 6 | 3140 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT NAN; + c1 | c3 +----+--------- + 1 | 100.008 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT INFINITE; + c1 | c3 +----+--------- + 1 | 100.008 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT NAN; + c1 | c3 +----+------------ + 2 | -1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT INFINITE; + c1 | c3 +----+------------ + 2 | -1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT NAN; + c1 | c3 +----+----------- + 3 | 1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT INFINITE; + c1 | c3 +----+----------- + 3 | 1.79e+100 +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NAN; + c1 | c3 +----+----- + 5 | NaN +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT INFINITE; + c1 | c3 +----+----- + 5 | NaN +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NAN; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT NAN; + c1 | c3 +----+---------- + 6 | Infinity +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS INFINITE; + c1 | c3 +----+---------- + 6 | Infinity +(1 row) + +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT INFINITE; + c1 | c3 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT NAN; + c1 | c4 +----+----------- + 1 | 1234.5679 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT INFINITE; + c1 | c4 +----+----------- + 1 | 1234.5679 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT NAN; + c1 | c4 +----+------------ + 2 | -1.79E+100 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT INFINITE; + c1 | c4 +----+------------ + 2 | -1.79E+100 +(1 row) + +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT NAN; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT INFINITE; + c1 | c4 +----+---- +(0 rows) + +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "tonight" +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT NAN; + c1 | c5 +----+----------- + 1 | 987654321 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT INFINITE; + c1 | c5 +----+----------- + 1 | 987654321 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT NAN; + c1 | c5 +----+----------- + 2 | 1.79E+100 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT INFINITE; + c1 | c5 +----+----------- + 2 | 1.79E+100 +(1 row) + +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NAN; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT NAN; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS INFINITE; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT INFINITE; -- error: overflow +ERROR: "1.79E+400" is out of range for type double precision +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT NAN; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT INFINITE; + c1 | c5 +----+---- +(0 rows) + +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT NAN; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input syntax for type double precision: "12.34yesterday" +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT NAN; -- error: invalid number +ERROR: invalid input for IS [NOT] NAN +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT INFINITE; -- error: invalid number +ERROR: invalid input for IS [NOT] INFINITE +-- all error: can't cast timestamp to double precision +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT NAN; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT NAN; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS INFINITE; + ^ +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT INFINITE; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT INFINITE; + ^ +SELECT c2 IS NAN, c2 IS NOT NAN, c2 IS INFINITE, c2 IS NOT INFINITE FROM tnf ORDER BY c1; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + f | t | f | t + f | t | f | t + f | t | f | t + | | | + f | t | f | t + f | t | f | t +(6 rows) + +SELECT c3 IS NAN, c3 IS NOT NAN, c3 IS INFINITE, c3 IS NOT INFINITE FROM tnf ORDER BY c1; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + f | t | f | t + f | t | f | t + f | t | f | t + | | | + t | f | f | t + f | t | t | f +(6 rows) + +SELECT c4 IS NAN, c4 IS NOT NAN, c4 IS INFINITE, c4 IS NOT INFINITE FROM tnf ORDER BY c1; +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +SELECT c5 IS NAN, c5 IS NOT NAN, c5 IS INFINITE, c5 IS NOT INFINITE FROM tnf ORDER BY c1; +ERROR: "1.79E+400" is out of range for type double precision +SELECT c6 IS NAN, c6 IS NOT NAN, c6 IS INFINITE, c6 IS NOT INFINITE FROM tnf ORDER BY c1; +ERROR: cannot cast type timestamp without time zone to double precision +LINE 1: SELECT c6 IS NAN, c6 IS NOT NAN, c6 IS INFINITE, c6 IS NOT I... + ^ +-- not +SELECT c1, c3, NOT(c3 IS NOT NAN), NOT(c3 IS NAN) FROM tnf ORDER BY c1; + c1 | c3 | ?column? | ?column? +----+------------+----------+---------- + 1 | 100.008 | f | t + 2 | -1.79e+100 | f | t + 3 | 1.79e+100 | f | t + 4 | | | + 5 | NaN | t | f + 6 | Infinity | f | t +(6 rows) + +SELECT c1, c3, NOT(c3 IS NOT INFINITE), NOT(c3 IS INFINITE) FROM tnf ORDER BY c1; + c1 | c3 | ?column? | ?column? +----+------------+----------+---------- + 1 | 100.008 | f | t + 2 | -1.79e+100 | f | t + 3 | 1.79e+100 | f | t + 4 | | | + 5 | NaN | f | t + 6 | Infinity | t | f +(6 rows) + +-- collation +SELECT c1, c2, c3, c4, c5 FROM tnf ORDER BY (c3 IS NAN); + c1 | c2 | c3 | c4 | c5 +----+------------+------------+---------------------------+---------------- + 1 | 10000 | 100.008 | 1234.5679 | 987654321 + 2 | -922337203 | -1.79e+100 | -1.79E+100 | 1.79E+100 + 3 | 922337203 | 1.79e+100 | 9,223,372,036,854,775,807 | 1.79E+400 + 6 | 3140 | Infinity | NaN | Inf + 5 | 0 | NaN | tonight | 12.34yesterday + 4 | | | | +(6 rows) + +SELECT (1 + 1) IS NAN; + ?column? +---------- + f +(1 row) + +SELECT (c3 + 1) IS NAN FROM tnf; + ?column? +---------- + f + f + f + + t + f +(6 rows) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c3 + 1) IS NOT NAN; + c1 | c2 | c3 | c4 | c5 +----+------------+------------+---------------------------+----------- + 1 | 10000 | 100.008 | 1234.5679 | 987654321 + 2 | -922337203 | -1.79e+100 | -1.79E+100 | 1.79E+100 + 3 | 922337203 | 1.79e+100 | 9,223,372,036,854,775,807 | 1.79E+400 + 6 | 3140 | Infinity | NaN | Inf +(4 rows) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c4 + 1) IS NOT NAN; -- error +ERROR: invalid input syntax for type bigint: "1234.5679" +SELECT (SELECT c3 FROM tnf WHERE c1 = 3) IS NAN; + ?column? +---------- + f +(1 row) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (SELECT c3 FROM tnf WHERE c1 = 5) IS nan; + c1 | c2 | c3 | c4 | c5 +----+------------+------------+---------------------------+---------------- + 1 | 10000 | 100.008 | 1234.5679 | 987654321 + 2 | -922337203 | -1.79e+100 | -1.79E+100 | 1.79E+100 + 3 | 922337203 | 1.79e+100 | 9,223,372,036,854,775,807 | 1.79E+400 + 4 | | | | + 5 | 0 | NaN | tonight | 12.34yesterday + 6 | 3140 | Infinity | NaN | Inf +(6 rows) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2, 4, 5) AND c3 IS NAN; + c1 | c2 | c3 | c4 | c5 +----+----+-----+---------+---------------- + 5 | 0 | NaN | tonight | 12.34yesterday +(1 row) + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2) AND c4 IS NAN; + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- +(0 rows) + +set enable_expr_fusion=off; +-- parse view +CREATE VIEW v_t AS SELECT c3 IS NAN FROM tnf ORDER BY c1; +CREATE VIEW v_t2 AS SELECT c4 IS NOT NAN FROM tnf ORDER BY c1; +CREATE VIEW v_t3 AS SELECT c3 IS INFINITE FROM tnf ORDER BY c1; +CREATE VIEW v_t4 AS SELECT c4 IS NOT INFINITE FROM tnf ORDER BY c1; +\d+ v_t + View "expr_nan.v_t" + Column | Type | Modifiers | Storage | Description +----------+---------+-----------+---------+------------- + ?column? | boolean | | plain | +View definition: + SELECT tnf.c3 IS NAN AS "?column?" + FROM tnf + ORDER BY tnf.c1; + +\d+ v_t2 + View "expr_nan.v_t2" + Column | Type | Modifiers | Storage | Description +----------+---------+-----------+---------+------------- + ?column? | boolean | | plain | +View definition: + SELECT tnf.c4::double precision IS NOT NAN AS "?column?" + FROM tnf + ORDER BY tnf.c1; + +\d+ v_t3 + View "expr_nan.v_t3" + Column | Type | Modifiers | Storage | Description +----------+---------+-----------+---------+------------- + ?column? | boolean | | plain | +View definition: + SELECT tnf.c3 IS INFINITE AS "?column?" + FROM tnf + ORDER BY tnf.c1; + +\d+ v_t4 + View "expr_nan.v_t4" + Column | Type | Modifiers | Storage | Description +----------+---------+-----------+---------+------------- + ?column? | boolean | | plain | +View definition: + SELECT tnf.c4::double precision IS NOT INFINITE AS "?column?" + FROM tnf + ORDER BY tnf.c1; + +-- test where condition order +CREATE TABLE t_m (c1 int, c2 float8); +CREATE TABLE t_txt (c1 int, c2 text); +CREATE OR REPLACE FUNCTION batch_insert() +RETURNS int AS $$ +DECLARE + i INT; + start INT; + row_count INT := 2000; +BEGIN + SELECT COUNT(*) INTO start FROM t_m; + FOR i IN SELECT generate_series(1, row_count) LOOP + INSERT INTO t_m VALUES (start + i, pg_catalog.random() * i); + END LOOP; + + RETURN row_count; +END; +$$ LANGUAGE plpgsql; +SELECT batch_insert(); + batch_insert +-------------- + 2000 +(1 row) + +SELECT batch_insert(); + batch_insert +-------------- + 2000 +(1 row) + +UPDATE t_m set c2 = CAST('NAN' as float8) where c1 = 1000; +UPDATE t_m set c2 = CAST('INF' as float8) where c1 = 1001; +UPDATE t_m set c2 = NULL where c1 = 1002; +INSERT INTO t_txt SELECT c1, c2 FROM t_m; +-- without ANALYZE +SELECT * FROM t_m WHERE c2 IS NAN; + c1 | c2 +------+----- + 1000 | NaN +(1 row) + +SELECT * FROM t_m WHERE c2 IS INFINITE; + c1 | c2 +------+---------- + 1001 | Infinity +(1 row) + +SELECT * FROM t_txt WHERE c2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +SELECT * FROM t_txt WHERE c2 IS INFINITE; -- error +ERROR: invalid input for IS [NOT] INFINITE +-- test other condition combine with IS [NOT] NAN|INFINITE +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS INFINITE; -- error +ERROR: invalid input for IS [NOT] INFINITE +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 IN (10) AND C2 IS NAN; -- ok + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 IN (10, 20) AND C2 IS NAN; -- ok + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30) AND C2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30, 40, 60, 100, 120, 180) AND C2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +-- with ANALYZE +ANALYZE t_m; +ANALYZE t_txt; +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS INFINITE; -- error +ERROR: invalid input for IS [NOT] INFINITE +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS NAN; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS INFINITE; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 IN (10) AND C2 IS NAN; -- ok + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 IN (10, 20) AND C2 IS NAN; -- ok + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30) AND C2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30, 40, 60, 100, 120, 180) AND C2 IS NAN; -- error +ERROR: invalid input for IS [NOT] NAN +-- PBE +CREATE TABLE tnf2 (c1 int, func_name text, res boolean); +PREPARE isnan_text(int, text) AS INSERT INTO tnf2 VALUES +($1 * 2 - 1, concat($2, ' IS NAN'), $2 IS NAN), +($1 * 2, concat($2, ' IS NOT NAN'), $2 IS NOT NAN); +EXECUTE isnan_text(1, '987654321'); +EXECUTE isnan_text(2, '1.79E+100'); +EXECUTE isnan_text(3, '9,223,372,036,854,775,807'); +ERROR: invalid input syntax for type double precision: "9,223,372,036,854,775,807" +EXECUTE isnan_text(4, '1.79E+400'); +ERROR: "1.79E+400" is out of range for type double precision +EXECUTE isnan_text(5, '12.34yesterday'); +ERROR: invalid input syntax for type double precision: "12.34yesterday" +EXECUTE isnan_text(6, 'tonight'); +ERROR: invalid input syntax for type double precision: "tonight" +EXECUTE isnan_text(7, 'Nan'); +ERROR: invalid input for IS [NOT] NAN +EXECUTE isnan_text(8, 'Inf'); +EXECUTE isnan_text(9, 'Infinite'); +ERROR: invalid input syntax for type double precision: "Infinite" +PREPARE isnan_num(int, float8) AS INSERT INTO tnf2 VALUES +($1 * 2 - 1, concat($2, ' IS NAN'), $2 IS NAN), +($1 * 2, concat($2, ' IS NOT NAN'), $2 IS NOT NAN); +EXECUTE isnan_num(10, -9223372036854775808); +EXECUTE isnan_num(11, 1.23E-100); +EXECUTE isnan_num(12, -1.79E+100); +EXECUTE isnan_num(13, 1.79E+100); +EXECUTE isnan_num(14, 1.79E+400); +ERROR: "17900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +EXECUTE isnan_num(15, CAST('NaN' as float8)); +EXECUTE isnan_num(16, CAST('Inf' as float8)); +SELECT * FROM tnf2 ORDER BY c1; + c1 | func_name | res +----+----------------------------------+----- + 1 | 987654321 IS NAN | f + 2 | 987654321 IS NOT NAN | t + 3 | 1.79E+100 IS NAN | f + 4 | 1.79E+100 IS NOT NAN | t + 15 | Inf IS NAN | f + 16 | Inf IS NOT NAN | t + 19 | -9.22337203685478e+18 IS NAN | f + 20 | -9.22337203685478e+18 IS NOT NAN | t + 21 | 1.23e-100 IS NAN | f + 22 | 1.23e-100 IS NOT NAN | t + 23 | -1.79e+100 IS NAN | f + 24 | -1.79e+100 IS NOT NAN | t + 25 | 1.79e+100 IS NAN | f + 26 | 1.79e+100 IS NOT NAN | t + 29 | NaN IS NAN | t + 30 | NaN IS NOT NAN | f + 31 | Infinity IS NAN | f + 32 | Infinity IS NOT NAN | t +(18 rows) + +drop view v_t; +drop view v_t2; +drop view v_t3; +drop view v_t4; +drop table tnf; +drop table tnf2; +drop table t_m; +drop table t_txt; +drop function batch_insert; +drop schema if exists expr_nan cascade; diff --git a/src/test/regress/expected/func_to_binary_float.out b/src/test/regress/expected/func_to_binary_float.out new file mode 100644 index 0000000000..49fc90b76a --- /dev/null +++ b/src/test/regress/expected/func_to_binary_float.out @@ -0,0 +1,609 @@ +-- For function TO_BINRY_FLOAT +-- create new schema +drop schema if exists func_tbf; +NOTICE: schema "func_tbf" does not exist, skipping +create schema func_tbf; +set search_path=func_tbf; +CREATE TABLE tbf (c1 int, c2 float4, c3 float8, c4 char(50), c5 varchar(50), c6 text); +INSERT INTO tbf VALUES (1, 1.23, 1.23, '1.23', '1.23', '1.23'); +INSERT INTO tbf VALUES (2, 3.141567, 3.141567, '3.141567', '3.141567', '3.141567'); +INSERT INTO tbf VALUES (3, 202405291733, 202405291733, '202405291733', '202405291733', '3.141567'); +INSERT INTO tbf VALUES (4, NULL, NULL, NULL, NULL, NULL); +INSERT INTO tbf VALUES (5, NULL, NULL, 'tonight', '12.34yesterday', 'sunday6.66'); +INSERT INTO tbf VALUES (6, CAST('NAN' as float4), CAST('NAN' as float8), 'nan', 'NAN', 'NaN'); +INSERT INTO tbf VALUES (7, CAST('Inf' as float4), CAST('INF' as float8), 'Inf', 'INFINITY', 'INFINITE'); +INSERT INTO tbf VALUES (8, 3.40282E+38, 1.79769313486231E+100, '3.40282E+38F', '3.40282E+38', '1.79769313486231E+308'); +-- without default +SELECT c1, c2, TO_BINARY_FLOAT(c2) FROM tbf ORDER BY c1; + c1 | c2 | to_binary_float +----+-------------+----------------- + 1 | 1.23 | 1.23 + 2 | 3.14157 | 3.14157 + 3 | 2.02405e+11 | 2.02405e+11 + 4 | | + 5 | | + 6 | NaN | NaN + 7 | Infinity | Infinity + 8 | 3.40282e+38 | 3.40282e+38 +(8 rows) + +SELECT c1, c3, TO_BINARY_FLOAT(c3) FROM tbf ORDER BY c1; + c1 | c3 | to_binary_float +----+-----------------------+----------------- + 1 | 1.23 | 1.23 + 2 | 3.141567 | 3.14157 + 3 | 202405291733 | 2.02405e+11 + 4 | | + 5 | | + 6 | NaN | NaN + 7 | Infinity | Infinity + 8 | 1.79769313486231e+100 | Infinity +(8 rows) + +SELECT c1, c4, TO_BINARY_FLOAT(c4) FROM tbf WHERE c1 NOT IN (5, 8) ORDER BY c1; + c1 | c4 | to_binary_float +----+----------------------------------------------------+----------------- + 1 | 1.23 | 1.23 + 2 | 3.141567 | 3.14157 + 3 | 202405291733 | 2.02405e+11 + 4 | | + 6 | nan | NaN + 7 | Inf | Infinity +(6 rows) + +SELECT c1, c4, TO_BINARY_FLOAT(c4) FROM tbf WHERE c1 = 5 ORDER BY c1; -- error: invalid number +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT c1, c4, TO_BINARY_FLOAT(c4) FROM tbf WHERE c1 = 8 ORDER BY c1; -- error: invalid number +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT c1, c5, TO_BINARY_FLOAT(c5) FROM tbf WHERE c1 NOT IN (5) ORDER BY c1; + c1 | c5 | to_binary_float +----+--------------+----------------- + 1 | 1.23 | 1.23 + 2 | 3.141567 | 3.14157 + 3 | 202405291733 | 2.02405e+11 + 4 | | + 6 | NAN | NaN + 7 | INFINITY | Infinity + 8 | 3.40282E+38 | 3.40282e+38 +(7 rows) + +SELECT c1, c5, TO_BINARY_FLOAT(c5) FROM tbf WHERE c1 = 5 ORDER BY c1; -- error: invalid number +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT c1, c6, TO_BINARY_FLOAT(c6) FROM tbf WHERE c1 NOT IN (5, 7) ORDER BY c1; + c1 | c6 | to_binary_float +----+-----------------------+----------------- + 1 | 1.23 | 1.23 + 2 | 3.141567 | 3.14157 + 3 | 3.141567 | 3.14157 + 4 | | + 6 | NaN | NaN + 8 | 1.79769313486231E+308 | Infinity +(6 rows) + +SELECT c1, c6, TO_BINARY_FLOAT(c6) FROM tbf WHERE c1 = 5 ORDER BY c1; -- error: invalid number +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT c1, c6, TO_BINARY_FLOAT(c6) FROM tbf WHERE c1 = 7 ORDER BY c1; -- error: invalid number +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(1.79769313486231E+100); + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT(2.22507485850720E-100); + to_binary_float +----------------- + -Infinity +(1 row) + +SELECT TO_BINARY_FLOAT(1.79769313486231E+310); -- error: overflow +ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('1.79769313486231E+100'); + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT('2.22507485850720E-100'); + to_binary_float +----------------- + 0 +(1 row) + +SELECT TO_BINARY_FLOAT('1.79769313486231E+310'); + to_binary_float +----------------- + Infinity +(1 row) + +-- with default +SELECT TO_BINARY_FLOAT(c1 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +(8 rows) + +SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +(8 rows) + +SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +(8 rows) + +SELECT TO_BINARY_FLOAT(c1 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +(8 rows) + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error, column can't be default param +ERROR: Default param can't be ColumnRef +SELECT TO_BINARY_FLOAT(c2 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c2 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c2 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c2 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c3 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c3 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c3 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c3 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c4 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + 3.14 + NaN + Infinity + 3.14 +(8 rows) + +SELECT TO_BINARY_FLOAT(c4 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + 3.14 + NaN + Infinity + 3.14 +(8 rows) + +SELECT TO_BINARY_FLOAT(c4 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(c4 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + +(8 rows) + +SELECT TO_BINARY_FLOAT(c5 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + 3.14 + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c5 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + 3.14 + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c5 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(c5 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + 3.40282e+38 +(8 rows) + +SELECT TO_BINARY_FLOAT(c6 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 3.14157 + + 3.14 + NaN + 3.14 + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c6 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 3.14157 + + 3.14 + NaN + 3.14 + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c6 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(c6 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 3.14157 + + + NaN + + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(3.145 DEFAULT 'tomorrow' ON CONVERSION ERROR); + to_binary_float +----------------- + 3.145 +(1 row) + +SELECT TO_BINARY_FLOAT('today' DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + 3.14 +(1 row) + +SELECT TO_BINARY_FLOAT('3.14today' DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + 3.14 +(1 row) + +SELECT TO_BINARY_FLOAT(' 6.66 ' DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + 6.66 +(1 row) + +SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error +ERROR: invalid input syntax for type real +CONTEXT: referenced column: to_binary_float +-- test overflow and null +SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT(2.22507485850720E-100 DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + -Infinity +(1 row) + +SELECT TO_BINARY_FLOAT('1.79769313486231E+100' DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT('2.22507485850720E-100' DEFAULT 3.14 ON CONVERSION ERROR); + to_binary_float +----------------- + 0 +(1 row) + +SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT 3.14 ON CONVERSION ERROR); -- error: overflow +ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('1.79769313486231E+310' DEFAULT 3.14 ON CONVERSION ERROR); -- inf + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR); -- 3.14 + to_binary_float +----------------- + 3.14 +(1 row) + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR); + to_binary_float +----------------- + 3.14 +(1 row) + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow +ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(3.14 DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); + to_binary_float +----------------- + 3.14 +(1 row) + +SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT NULL ON CONVERSION ERROR); -- inf + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT('1.79769313486231E+100' DEFAULT NULL ON CONVERSION ERROR); -- inf + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT NULL ON CONVERSION ERROR); -- error: overflow +ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('1.79769313486231E+310' DEFAULT NULL ON CONVERSION ERROR); -- inf + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR); -- NULL + to_binary_float +----------------- + +(1 row) + +SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR); -- NULL + to_binary_float +----------------- + +(1 row) + +SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow +ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); -- NULL + to_binary_float +----------------- + +(1 row) + +-- pbe +CREATE TABLE tbf2 (c1 int, func_info text, res float4); +PREPARE default_param_text2(int, text, text) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(1, '3.14', '6.66'); +EXECUTE default_param_text2(2, '3.14#', '6.66'); +EXECUTE default_param_text2(3, '#3.14', '6.66#'); +ERROR: invalid input syntax for type real +CONTEXT: referenced column: res +EXECUTE default_param_text2(4, ' -3.14 ', '6.66'); +EXECUTE default_param_text2(5, NULL, '6.66'); +EXECUTE default_param_text2(6, '6.66', NULL); +EXECUTE default_param_text2(7, '1.79769313486231E+100', NULL); +EXECUTE default_param_text2(8, NULL, '1.79769313486231E+100'); +EXECUTE default_param_text2(9, '1.79769313486231E+400', NULL); +EXECUTE default_param_text2(10, NULL, '1.79769313486231E+400'); +PREPARE default_param_num2(int, float8, float8) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(11, 3.14, 6.666666); +EXECUTE default_param_text2(12, 3.14, NULL); +EXECUTE default_param_text2(13, NULL, 3.14); +EXECUTE default_param_text2(14, 1.79769313486231E+100, 3.14); +EXECUTE default_param_text2(15, 3.14, 1.79769313486231E+100); +EXECUTE default_param_text2(16, 1.79769313486231E+400, 3.14); +EXECUTE default_param_text2(17, 3.14, 1.79769313486231E+400); +EXECUTE default_param_text2(18, 1.79769313486231E+400, NULL); +EXECUTE default_param_text2(19, NULL, 1.79769313486231E+400); +PREPARE default_param_text_num(int, text, float8) AS INSERT INTO t2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +ERROR: relation "t2" does not exist on datanode1 +LINE 1: ..._param_text_num(int, text, float8) AS INSERT INTO t2 VALUES ... + ^ +EXECUTE default_param_text2(20, '3.14', 6.666666); +EXECUTE default_param_text2(21, ' +3.14 ', 6.666666); +EXECUTE default_param_text2(22, '1.79769313486231E+100', 6.666666); +EXECUTE default_param_text2(23, '6.666666', 1.79769313486231E+100); +EXECUTE default_param_text2(24, '1.79769313486231E+400', 6.666666); +EXECUTE default_param_text2(25, '6.666666', 1.79769313486231E+400); +PREPARE default_param_num_text(int, float8, text) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(26, 1.79769313486231E+100, '6.666666'); +EXECUTE default_param_text2(27, 6.666666, '1.79769313486231E+100'); +EXECUTE default_param_text2(28, 1.79769313486231E+400, '6.666666'); +EXECUTE default_param_text2(29, 6.666666, '1.79769313486231E+400'); +SELECT * FROM tbf2 ORDER BY c1; + c1 | func_info | res +----+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------- + 1 | TO_BINARY_FLOAT(3.14 DEFAULT 6.66 ON CONVERSION ERROR) | 3.14 + 2 | TO_BINARY_FLOAT(3.14# DEFAULT 6.66 ON CONVERSION ERROR) | 6.66 + 4 | TO_BINARY_FLOAT( -3.14 DEFAULT 6.66 ON CONVERSION ERROR) | -3.14 + 5 | TO_BINARY_FLOAT( DEFAULT 6.66 ON CONVERSION ERROR) | + 6 | TO_BINARY_FLOAT(6.66 DEFAULT ON CONVERSION ERROR) | 6.66 + 7 | TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT ON CONVERSION ERROR) | Infinity + 8 | TO_BINARY_FLOAT( DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR) | + 9 | TO_BINARY_FLOAT(1.79769313486231E+400 DEFAULT ON CONVERSION ERROR) | Infinity + 10 | TO_BINARY_FLOAT( DEFAULT 1.79769313486231E+400 ON CONVERSION ERROR) | + 11 | TO_BINARY_FLOAT(3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 + 12 | TO_BINARY_FLOAT(3.14 DEFAULT ON CONVERSION ERROR) | 3.14 + 13 | TO_BINARY_FLOAT( DEFAULT 3.14 ON CONVERSION ERROR) | + 14 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 3.14 ON CONVERSION ERROR) | Infinity + 15 | TO_BINARY_FLOAT(3.14 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 3.14 + 16 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 3.14 ON CONVERSION ERROR) | Infinity + 17 | TO_BINARY_FLOAT(3.14 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 3.14 + 18 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT ON CONVERSION ERROR) | Infinity + 19 | TO_BINARY_FLOAT( DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | + 20 | TO_BINARY_FLOAT(3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 + 21 | TO_BINARY_FLOAT( +3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 + 22 | TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 23 | TO_BINARY_FLOAT(6.666666 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 6.66667 + 24 | TO_BINARY_FLOAT(1.79769313486231E+400 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 25 | TO_BINARY_FLOAT(6.666666 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 6.66667 + 26 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 27 | TO_BINARY_FLOAT(6.666666 DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR) | 6.66667 + 28 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 29 | TO_BINARY_FLOAT(6.666666 DEFAULT 1.79769313486231E+400 ON CONVERSION ERROR) | 6.66667 +(28 rows) + +DROP TABLE tbf; +DROP TABLE tbf2; +drop schema if exists func_tbf cascade; diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 13a91f0bcb..4f8987ed12 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -456,6 +456,9 @@ test: create_function_3 vacuum test: drop_if_exists drop_database test_if_not_exists test_create_index_if_not_exists test_create_sequence_if_not_exists #test: constraints +# test for new expr IS [NOT] NAN|INFINITE and new function TO_BINARY_FLOAT +test: func_to_binary_float expr_nantest_infinitetest + #test: errors subplan_base test: subplan_new #test: select diff --git a/src/test/regress/sql/expr_nantest_infinitetest.sql b/src/test/regress/sql/expr_nantest_infinitetest.sql new file mode 100644 index 0000000000..430a0a1714 --- /dev/null +++ b/src/test/regress/sql/expr_nantest_infinitetest.sql @@ -0,0 +1,462 @@ +-- For expression IS [NOT] NAN|INFINITE + +-- create new schema +drop schema if exists expr_nan; +create schema expr_nan; +set search_path=expr_nan; + +CREATE TABLE tnf (c1 int, c2 bigint, c3 float8, c4 varchar(50), c5 text, c6 timestamp); +INSERT INTO tnf VALUES (1, 10000, 100.008, '1234.5679', '987654321', TIMESTAMP'2024-06-06 21:03:58'); +INSERT INTO tnf VALUES (2, -922337203, -1.79E+100, '-1.79E+100', '1.79E+100', TIMESTAMP'1970-01-01 00:00:00'); +INSERT INTO tnf VALUES (3, 922337203, 1.79E+100, '9,223,372,036,854,775,807', '1.79E+400', TIMESTAMP'2077-07-08 00:00:00'); +INSERT INTO tnf VALUES (4, NULL, NULL, NULL, NULL, NULL); +INSERT INTO tnf VALUES (5, -0, CAST('NaN' as float8), 'tonight', '12.34yesterday', 'today'); +INSERT INTO tnf VALUES (6, 3.14E+3, CAST('Inf' as float8), 'NaN', 'Inf', 'now'); + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT INFINITE; + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT INFINITE; + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT INFINITE; -- error: invalid number + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NAN; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT NAN; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS INFINITE; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT INFINITE; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS INFINITE; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT INFINITE; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS INFINITE; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT INFINITE; -- error: invalid number + +-- all error: can't cast timestamp to double precision +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT INFINITE; + +SELECT c2 IS NAN, c2 IS NOT NAN, c2 IS INFINITE, c2 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c3 IS NAN, c3 IS NOT NAN, c3 IS INFINITE, c3 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c4 IS NAN, c4 IS NOT NAN, c4 IS INFINITE, c4 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c5 IS NAN, c5 IS NOT NAN, c5 IS INFINITE, c5 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c6 IS NAN, c6 IS NOT NAN, c6 IS INFINITE, c6 IS NOT INFINITE FROM tnf ORDER BY c1; + +-- not +SELECT c1, c3, NOT(c3 IS NOT NAN), NOT(c3 IS NAN) FROM tnf ORDER BY c1; +SELECT c1, c3, NOT(c3 IS NOT INFINITE), NOT(c3 IS INFINITE) FROM tnf ORDER BY c1; + +-- collation +SELECT c1, c2, c3, c4, c5 FROM tnf ORDER BY (c3 IS NAN); + +SELECT (1 + 1) IS NAN; +SELECT (c3 + 1) IS NAN FROM tnf; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c3 + 1) IS NOT NAN; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c4 + 1) IS NOT NAN; -- error + +SELECT (SELECT c3 FROM tnf WHERE c1 = 3) IS NAN; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (SELECT c3 FROM tnf WHERE c1 = 5) IS nan; + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2, 4, 5) AND c3 IS NAN; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2) AND c4 IS NAN; + +-- test enable_expr_fusion +set enable_expr_fusion=on; + +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 1 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 2 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 3 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 4 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 5 AND c2 IS NOT INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT NAN; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS INFINITE; +SELECT c1, c2 FROM tnf WHERE c1 = 6 AND c2 IS NOT INFINITE; + +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 1 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 2 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 3 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 4 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 5 AND c3 IS NOT INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT NAN; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS INFINITE; +SELECT c1, c3 FROM tnf WHERE c1 = 6 AND c3 IS NOT INFINITE; + +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 1 AND c4 IS NOT INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 2 AND c4 IS NOT INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 3 AND c4 IS NOT INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT NAN; +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 4 AND c4 IS NOT INFINITE; +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 5 AND c4 IS NOT INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT NAN; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS INFINITE; -- error: invalid number +SELECT c1, c4 FROM tnf WHERE c1 = 6 AND c4 IS NOT INFINITE; -- error: invalid number + +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 1 AND c5 IS NOT INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 2 AND c5 IS NOT INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NAN; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT NAN; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS INFINITE; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 3 AND c5 IS NOT INFINITE; -- error: overflow +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT NAN; +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 4 AND c5 IS NOT INFINITE; +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS INFINITE; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 5 AND c5 IS NOT INFINITE; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT NAN; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS INFINITE; -- error: invalid number +SELECT c1, c5 FROM tnf WHERE c1 = 6 AND c5 IS NOT INFINITE; -- error: invalid number + +-- all error: can't cast timestamp to double precision +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 1 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 2 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 3 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 4 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 5 AND c6 IS NOT INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT NAN; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS INFINITE; +SELECT c1, c6 FROM tnf WHERE c1 = 6 AND c6 IS NOT INFINITE; + +SELECT c2 IS NAN, c2 IS NOT NAN, c2 IS INFINITE, c2 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c3 IS NAN, c3 IS NOT NAN, c3 IS INFINITE, c3 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c4 IS NAN, c4 IS NOT NAN, c4 IS INFINITE, c4 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c5 IS NAN, c5 IS NOT NAN, c5 IS INFINITE, c5 IS NOT INFINITE FROM tnf ORDER BY c1; +SELECT c6 IS NAN, c6 IS NOT NAN, c6 IS INFINITE, c6 IS NOT INFINITE FROM tnf ORDER BY c1; + +-- not +SELECT c1, c3, NOT(c3 IS NOT NAN), NOT(c3 IS NAN) FROM tnf ORDER BY c1; +SELECT c1, c3, NOT(c3 IS NOT INFINITE), NOT(c3 IS INFINITE) FROM tnf ORDER BY c1; + +-- collation +SELECT c1, c2, c3, c4, c5 FROM tnf ORDER BY (c3 IS NAN); + +SELECT (1 + 1) IS NAN; +SELECT (c3 + 1) IS NAN FROM tnf; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c3 + 1) IS NOT NAN; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (c4 + 1) IS NOT NAN; -- error + +SELECT (SELECT c3 FROM tnf WHERE c1 = 3) IS NAN; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE (SELECT c3 FROM tnf WHERE c1 = 5) IS nan; + +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2, 4, 5) AND c3 IS NAN; +SELECT c1, c2, c3, c4, c5 FROM tnf WHERE c1 IN (1, 2) AND c4 IS NAN; + +set enable_expr_fusion=off; + +-- parse view +CREATE VIEW v_t AS SELECT c3 IS NAN FROM tnf ORDER BY c1; +CREATE VIEW v_t2 AS SELECT c4 IS NOT NAN FROM tnf ORDER BY c1; +CREATE VIEW v_t3 AS SELECT c3 IS INFINITE FROM tnf ORDER BY c1; +CREATE VIEW v_t4 AS SELECT c4 IS NOT INFINITE FROM tnf ORDER BY c1; + +\d+ v_t +\d+ v_t2 +\d+ v_t3 +\d+ v_t4 + +-- test where condition order +CREATE TABLE t_m (c1 int, c2 float8); +CREATE TABLE t_txt (c1 int, c2 text); + +CREATE OR REPLACE FUNCTION batch_insert() +RETURNS int AS $$ +DECLARE + i INT; + start INT; + row_count INT := 2000; +BEGIN + SELECT COUNT(*) INTO start FROM t_m; + FOR i IN SELECT generate_series(1, row_count) LOOP + INSERT INTO t_m VALUES (start + i, pg_catalog.random() * i); + END LOOP; + + RETURN row_count; +END; +$$ LANGUAGE plpgsql; + +SELECT batch_insert(); +SELECT batch_insert(); + +UPDATE t_m set c2 = CAST('NAN' as float8) where c1 = 1000; +UPDATE t_m set c2 = CAST('INF' as float8) where c1 = 1001; +UPDATE t_m set c2 = NULL where c1 = 1002; + +INSERT INTO t_txt SELECT c1, c2 FROM t_m; + +-- without ANALYZE +SELECT * FROM t_m WHERE c2 IS NAN; +SELECT * FROM t_m WHERE c2 IS INFINITE; + +SELECT * FROM t_txt WHERE c2 IS NAN; -- error +SELECT * FROM t_txt WHERE c2 IS INFINITE; -- error + +-- test other condition combine with IS [NOT] NAN|INFINITE +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS NAN; -- error +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS INFINITE; -- error +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS INFINITE; + +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS NAN; +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS INFINITE; + +SELECT * FROM t_txt WHERE c1 IN (10) AND C2 IS NAN; -- ok +SELECT * FROM t_txt WHERE c1 IN (10, 20) AND C2 IS NAN; -- ok +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30) AND C2 IS NAN; -- error +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30, 40, 60, 100, 120, 180) AND C2 IS NAN; -- error + +-- with ANALYZE +ANALYZE t_m; +ANALYZE t_txt; + +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 < 900 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 <= 900 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 > 1010 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 >= 1010 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 <> 1000 AND c1 <> 1001 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS NAN; -- error +SELECT * FROM t_txt WHERE c1 <> 1000 AND c2 IS INFINITE; -- error +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 BETWEEN 500 AND 600 AND c2 IS INFINITE; +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS NAN; +SELECT * FROM t_txt WHERE c1 = 999 AND c2 IS INFINITE; + +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS NAN; +SELECT * FROM t_txt WHERE c2 LIKE '12%' AND c2 IS INFINITE; + +SELECT * FROM t_txt WHERE c1 IN (10) AND C2 IS NAN; -- ok +SELECT * FROM t_txt WHERE c1 IN (10, 20) AND C2 IS NAN; -- ok +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30) AND C2 IS NAN; -- error +SELECT * FROM t_txt WHERE c1 IN (10, 20, 30, 40, 60, 100, 120, 180) AND C2 IS NAN; -- error + +-- PBE +CREATE TABLE tnf2 (c1 int, func_name text, res boolean); + +PREPARE isnan_text(int, text) AS INSERT INTO tnf2 VALUES +($1 * 2 - 1, concat($2, ' IS NAN'), $2 IS NAN), +($1 * 2, concat($2, ' IS NOT NAN'), $2 IS NOT NAN); +EXECUTE isnan_text(1, '987654321'); +EXECUTE isnan_text(2, '1.79E+100'); +EXECUTE isnan_text(3, '9,223,372,036,854,775,807'); +EXECUTE isnan_text(4, '1.79E+400'); +EXECUTE isnan_text(5, '12.34yesterday'); +EXECUTE isnan_text(6, 'tonight'); +EXECUTE isnan_text(7, 'Nan'); +EXECUTE isnan_text(8, 'Inf'); +EXECUTE isnan_text(9, 'Infinite'); + +PREPARE isnan_num(int, float8) AS INSERT INTO tnf2 VALUES +($1 * 2 - 1, concat($2, ' IS NAN'), $2 IS NAN), +($1 * 2, concat($2, ' IS NOT NAN'), $2 IS NOT NAN); +EXECUTE isnan_num(10, -9223372036854775808); +EXECUTE isnan_num(11, 1.23E-100); +EXECUTE isnan_num(12, -1.79E+100); +EXECUTE isnan_num(13, 1.79E+100); +EXECUTE isnan_num(14, 1.79E+400); +EXECUTE isnan_num(15, CAST('NaN' as float8)); +EXECUTE isnan_num(16, CAST('Inf' as float8)); + +SELECT * FROM tnf2 ORDER BY c1; + +drop view v_t; +drop view v_t2; +drop view v_t3; +drop view v_t4; +drop table tnf; +drop table tnf2; +drop table t_m; +drop table t_txt; +drop function batch_insert; + +drop schema if exists expr_nan cascade; diff --git a/src/test/regress/sql/func_to_binary_float.sql b/src/test/regress/sql/func_to_binary_float.sql new file mode 100644 index 0000000000..1ec614feb8 --- /dev/null +++ b/src/test/regress/sql/func_to_binary_float.sql @@ -0,0 +1,145 @@ +-- For function TO_BINRY_FLOAT + +-- create new schema +drop schema if exists func_tbf; +create schema func_tbf; +set search_path=func_tbf; + +CREATE TABLE tbf (c1 int, c2 float4, c3 float8, c4 char(50), c5 varchar(50), c6 text); +INSERT INTO tbf VALUES (1, 1.23, 1.23, '1.23', '1.23', '1.23'); +INSERT INTO tbf VALUES (2, 3.141567, 3.141567, '3.141567', '3.141567', '3.141567'); +INSERT INTO tbf VALUES (3, 202405291733, 202405291733, '202405291733', '202405291733', '3.141567'); +INSERT INTO tbf VALUES (4, NULL, NULL, NULL, NULL, NULL); +INSERT INTO tbf VALUES (5, NULL, NULL, 'tonight', '12.34yesterday', 'sunday6.66'); +INSERT INTO tbf VALUES (6, CAST('NAN' as float4), CAST('NAN' as float8), 'nan', 'NAN', 'NaN'); +INSERT INTO tbf VALUES (7, CAST('Inf' as float4), CAST('INF' as float8), 'Inf', 'INFINITY', 'INFINITE'); +INSERT INTO tbf VALUES (8, 3.40282E+38, 1.79769313486231E+100, '3.40282E+38F', '3.40282E+38', '1.79769313486231E+308'); + +-- without default +SELECT c1, c2, TO_BINARY_FLOAT(c2) FROM tbf ORDER BY c1; +SELECT c1, c3, TO_BINARY_FLOAT(c3) FROM tbf ORDER BY c1; + +SELECT c1, c4, TO_BINARY_FLOAT(c4) FROM tbf WHERE c1 NOT IN (5, 8) ORDER BY c1; +SELECT c1, c4, TO_BINARY_FLOAT(c4) FROM tbf WHERE c1 = 5 ORDER BY c1; -- error: invalid number +SELECT c1, c4, TO_BINARY_FLOAT(c4) FROM tbf WHERE c1 = 8 ORDER BY c1; -- error: invalid number + +SELECT c1, c5, TO_BINARY_FLOAT(c5) FROM tbf WHERE c1 NOT IN (5) ORDER BY c1; +SELECT c1, c5, TO_BINARY_FLOAT(c5) FROM tbf WHERE c1 = 5 ORDER BY c1; -- error: invalid number + +SELECT c1, c6, TO_BINARY_FLOAT(c6) FROM tbf WHERE c1 NOT IN (5, 7) ORDER BY c1; +SELECT c1, c6, TO_BINARY_FLOAT(c6) FROM tbf WHERE c1 = 5 ORDER BY c1; -- error: invalid number +SELECT c1, c6, TO_BINARY_FLOAT(c6) FROM tbf WHERE c1 = 7 ORDER BY c1; -- error: invalid number + +SELECT TO_BINARY_FLOAT(1.79769313486231E+100); +SELECT TO_BINARY_FLOAT(2.22507485850720E-100); +SELECT TO_BINARY_FLOAT(1.79769313486231E+310); -- error: overflow +SELECT TO_BINARY_FLOAT('1.79769313486231E+100'); +SELECT TO_BINARY_FLOAT('2.22507485850720E-100'); +SELECT TO_BINARY_FLOAT('1.79769313486231E+310'); + +-- with default +SELECT TO_BINARY_FLOAT(c1 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c1 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error, column can't be default param + +SELECT TO_BINARY_FLOAT(c2 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c2 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c2 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c2 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + +SELECT TO_BINARY_FLOAT(c3 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c3 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c3 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c3 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + +SELECT TO_BINARY_FLOAT(c4 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c4 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c4 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT(c4 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + +SELECT TO_BINARY_FLOAT(c5 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c5 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c5 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT(c5 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + +SELECT TO_BINARY_FLOAT(c6 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c6 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c6 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT(c6 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; + +SELECT TO_BINARY_FLOAT(3.145 DEFAULT 'tomorrow' ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('today' DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('3.14today' DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(' 6.66 ' DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error + +-- test overflow and null +SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(2.22507485850720E-100 DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('1.79769313486231E+100' DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('2.22507485850720E-100' DEFAULT 3.14 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT 3.14 ON CONVERSION ERROR); -- error: overflow +SELECT TO_BINARY_FLOAT('1.79769313486231E+310' DEFAULT 3.14 ON CONVERSION ERROR); -- inf +SELECT TO_BINARY_FLOAT(3.14 DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR); -- 3.14 +SELECT TO_BINARY_FLOAT(3.14 DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(3.14 DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow +SELECT TO_BINARY_FLOAT(3.14 DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT NULL ON CONVERSION ERROR); -- inf +SELECT TO_BINARY_FLOAT('1.79769313486231E+100' DEFAULT NULL ON CONVERSION ERROR); -- inf +SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT NULL ON CONVERSION ERROR); -- error: overflow +SELECT TO_BINARY_FLOAT('1.79769313486231E+310' DEFAULT NULL ON CONVERSION ERROR); -- inf +SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR); -- NULL +SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR); -- NULL +SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow +SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); -- NULL + + +-- pbe +CREATE TABLE tbf2 (c1 int, func_info text, res float4); + +PREPARE default_param_text2(int, text, text) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(1, '3.14', '6.66'); +EXECUTE default_param_text2(2, '3.14#', '6.66'); +EXECUTE default_param_text2(3, '#3.14', '6.66#'); +EXECUTE default_param_text2(4, ' -3.14 ', '6.66'); +EXECUTE default_param_text2(5, NULL, '6.66'); +EXECUTE default_param_text2(6, '6.66', NULL); +EXECUTE default_param_text2(7, '1.79769313486231E+100', NULL); +EXECUTE default_param_text2(8, NULL, '1.79769313486231E+100'); +EXECUTE default_param_text2(9, '1.79769313486231E+400', NULL); +EXECUTE default_param_text2(10, NULL, '1.79769313486231E+400'); + +PREPARE default_param_num2(int, float8, float8) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(11, 3.14, 6.666666); +EXECUTE default_param_text2(12, 3.14, NULL); +EXECUTE default_param_text2(13, NULL, 3.14); +EXECUTE default_param_text2(14, 1.79769313486231E+100, 3.14); +EXECUTE default_param_text2(15, 3.14, 1.79769313486231E+100); +EXECUTE default_param_text2(16, 1.79769313486231E+400, 3.14); +EXECUTE default_param_text2(17, 3.14, 1.79769313486231E+400); +EXECUTE default_param_text2(18, 1.79769313486231E+400, NULL); +EXECUTE default_param_text2(19, NULL, 1.79769313486231E+400); + +PREPARE default_param_text_num(int, text, float8) AS INSERT INTO t2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(20, '3.14', 6.666666); +EXECUTE default_param_text2(21, ' +3.14 ', 6.666666); +EXECUTE default_param_text2(22, '1.79769313486231E+100', 6.666666); +EXECUTE default_param_text2(23, '6.666666', 1.79769313486231E+100); +EXECUTE default_param_text2(24, '1.79769313486231E+400', 6.666666); +EXECUTE default_param_text2(25, '6.666666', 1.79769313486231E+400); + +PREPARE default_param_num_text(int, float8, text) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); +EXECUTE default_param_text2(26, 1.79769313486231E+100, '6.666666'); +EXECUTE default_param_text2(27, 6.666666, '1.79769313486231E+100'); +EXECUTE default_param_text2(28, 1.79769313486231E+400, '6.666666'); +EXECUTE default_param_text2(29, 6.666666, '1.79769313486231E+400'); + +SELECT * FROM tbf2 ORDER BY c1; + +DROP TABLE tbf; +DROP TABLE tbf2; + +drop schema if exists func_tbf cascade; -- Gitee From 4abafc6b2202defa7161e862267648d3d277eeaa Mon Sep 17 00:00:00 2001 From: zhaosen Date: Mon, 15 Jul 2024 18:13:29 +0800 Subject: [PATCH 053/347] ustore support seq scan smp --- src/gausskernel/optimizer/path/allpaths.cpp | 2 +- .../storage/access/table/tableam.cpp | 6 +- .../storage/access/ustore/knl_uscan.cpp | 94 +++++++++++++------ src/include/access/ustore/knl_uscan.h | 2 + 4 files changed, 75 insertions(+), 29 deletions(-) diff --git a/src/gausskernel/optimizer/path/allpaths.cpp b/src/gausskernel/optimizer/path/allpaths.cpp index 40050c018b..41f8e196a8 100755 --- a/src/gausskernel/optimizer/path/allpaths.cpp +++ b/src/gausskernel/optimizer/path/allpaths.cpp @@ -1136,7 +1136,7 @@ static void set_plain_rel_pathlist(PlannerInfo* root, RelOptInfo* rel, RangeTblE * support normal row table unless it is partitioned. * The partition table can be parallelized when partItrs > u_sess->opt_cxt.query_dop. */ - bool can_parallel = IS_STREAM_PLAN && (u_sess->opt_cxt.query_dop > 1) && (!rel->is_ustore) && + bool can_parallel = IS_STREAM_PLAN && (u_sess->opt_cxt.query_dop > 1) && (rel->locator_type != LOCATOR_TYPE_REPLICATED) && (rte->tablesample == NULL); if (!isrp) { #endif diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index 200ba28a1e..f852b2d82d 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -925,6 +925,10 @@ void UHeapamScanMarkpos(TableScanDesc sscan) return UHeapMarkPos(sscan); } +void UHeapamScanInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) +{ + return UeapInitParallelSeqscan(sscan, dop, dir); +} void UHeapamScanEndscan(TableScanDesc sscan) { @@ -1216,7 +1220,7 @@ static const TableAmRoutine g_ustoream_methods = { scan_restrpos : UHeapamScanRestrpos, scan_markpos : UHeapamScanMarkpos, - scan_init_parallel_seqscan : HeapamScanInitParallelSeqscan, + scan_init_parallel_seqscan : UHeapamScanInitParallelSeqscan, scan_getnexttuple : UHeapamScanGetnexttuple, scan_GetNextBatch : UHeapamGetNextBatchMode, scan_getpage : UHeapamScanGetpage, diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index e819e1db23..92f6bacadb 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -89,36 +89,46 @@ FORCE_INLINE bool NextUpage(UHeapScanDesc scan, ScanDirection dir, BlockNumber& page) { bool finished = false; - /* - * advance to next/prior page and detect end of scan - */ - if (BackwardScanDirection == dir) { - finished = (page == scan->rs_base.rs_startblock); - if (page == 0) { - page = scan->rs_base.rs_nblocks; - } - page--; - } else { + if (scan->dop > 1) { + Assert(scan->rs_parallel == NULL); + Assert(dir == ForwardScanDirection); page++; - if (page >= scan->rs_base.rs_nblocks) { - page = 0; + if ((page - scan->rs_base.rs_startblock) % PARALLEL_SCAN_GAP == 0) { + page += (scan->dop - 1) * PARALLEL_SCAN_GAP; } - finished = (page == scan->rs_base.rs_startblock); - + finished = (page >= scan->rs_base.rs_nblocks); + } else { /* - * Report our new scan position for synchronization purposes. We - * don't do that when moving backwards, however. That would just - * mess up any other forward-moving scanners. - * - * Note: we do this before checking for end of scan so that the - * final state of the position hint is back at the start of the - * rel. That's not strictly necessary, but otherwise when you run - * the same query multiple times the starting position would shift - * a little bit backwards on every invocation, which is confusing. - * We don't guarantee any specific ordering in general, though. - */ - if (scan->rs_allow_sync) { - ss_report_location(scan->rs_base.rs_rd, page); + * advance to next/prior page and detect end of scan + */ + if (BackwardScanDirection == dir) { + finished = (page == scan->rs_base.rs_startblock); + if (page == 0) { + page = scan->rs_base.rs_nblocks; + } + page--; + } else { + page++; + if (page >= scan->rs_base.rs_nblocks) { + page = 0; + } + finished = (page == scan->rs_base.rs_startblock); + + /* + * Report our new scan position for synchronization purposes. We + * don't do that when moving backwards, however. That would just + * mess up any other forward-moving scanners. + * + * Note: we do this before checking for end of scan so that the + * final state of the position hint is back at the start of the + * rel. That's not strictly necessary, but otherwise when you run + * the same query multiple times the starting position would shift + * a little bit backwards on every invocation, which is confusing. + * We don't guarantee any specific ordering in general, though. + */ + if (scan->rs_allow_sync) { + ss_report_location(scan->rs_base.rs_rd, page); + } } } @@ -684,6 +694,7 @@ TableScanDesc UHeapBeginScan(Relation relation, Snapshot snapshot, int nkeys, Pa uscan->rs_base.rs_ntuples = 0; uscan->rs_cutup = NULL; uscan->rs_parallel = parallel_scan; + uscan->dop = 1; if (uscan->rs_parallel != NULL) { /* For parallel scan, believe whatever ParallelHeapScanDesc says. */ uscan->rs_base.rs_syncscan = uscan->rs_parallel->phs_syncscan; @@ -780,6 +791,7 @@ static void UHeapinitscan(TableScanDesc sscan, ScanKey key, bool isRescan) scan->rs_base.rs_inited = false; scan->rs_base.rs_cbuf = InvalidBuffer; scan->rs_base.rs_cblock = InvalidBlockNumber; + scan->dop = 1; if (scan->rs_base.rs_rd->rd_tam_ops == TableAmUstore) { scan->rs_base.lastVar = -1; @@ -1194,6 +1206,34 @@ void UHeapRestRpos(TableScanDesc sscan) } } +void UeapInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) +{ + HeapScanDesc scan = (HeapScanDesc) sscan; + + if (!scan || scan->rs_base.rs_nblocks == 0) { + return; + } + + if (dop <= 1) { + return; + } + + scan->dop = dop; + + uint32 paral_blocks = u_sess->stream_cxt.smp_id * PARALLEL_SCAN_GAP; + + /* If not enough pages to divide into every worker. */ + if (scan->rs_base.rs_nblocks <= paral_blocks) { + scan->rs_base.rs_startblock = 0; + scan->rs_base.rs_nblocks = 0; + return; + } + if(dir == BackwardScanDirection){ + ereport(ERROR, (errmsg("Backward Scan Direction is not support for ustore parallel seq scan."))); + } + scan->rs_base.rs_startblock = paral_blocks; +} + UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir, bool* has_cur_xact_write) { UHeapScanDesc scan = (UHeapScanDesc)sscan; diff --git a/src/include/access/ustore/knl_uscan.h b/src/include/access/ustore/knl_uscan.h index f5edfe7c0c..5423340f01 100644 --- a/src/include/access/ustore/knl_uscan.h +++ b/src/include/access/ustore/knl_uscan.h @@ -38,6 +38,7 @@ typedef struct UHeapScanDescData { /* these fields only used in page-at-a-time mode and for bitmap scans */ int rs_mindex; /* marked tuple's saved index */ + int dop; /* scan parallel degree */ UHeapTuple rs_visutuples[MaxPossibleUHeapTuplesPerPage]; /* visible tuples */ UHeapTuple rs_cutup; /* current tuple in scan, if any */ @@ -71,6 +72,7 @@ bool UHeapScanBitmapNextTuple(TableScanDesc sscan, TBMIterateResult *tbmres, Tup bool UHeapScanBitmapNextBlock(TableScanDesc sscan, const TBMIterateResult *tbmres, bool* has_cur_xact_write = NULL); bool UHeapGetPage(TableScanDesc sscan, BlockNumber page, bool* has_cur_xact_write = NULL); +void UeapInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir); UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir, bool* has_cur_xact_write = NULL); extern bool UHeapGetTupPageBatchmode(UHeapScanDesc scan, ScanDirection dir); -- Gitee From 440f849f0859f97e011556904b9fe8c84c76fd68 Mon Sep 17 00:00:00 2001 From: lukeman Date: Tue, 16 Jul 2024 11:04:12 +0800 Subject: [PATCH 054/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue:=20=E4=B8=A4?= =?UTF-8?q?=E4=B8=AApackage=20body=E9=83=BD=E6=9C=89=E5=88=9D=E5=A7=8B?= =?UTF-8?q?=E5=8C=96=E5=8C=BF=E5=90=8D=E5=9D=97=EF=BC=8Cpackage=E5=B5=8C?= =?UTF-8?q?=E5=A5=97=E8=B0=83=E7=94=A8=EF=BC=8C=E7=BC=96=E8=AF=91=E6=97=B6?= =?UTF-8?q?=E4=BC=9A=E6=AD=BB=E5=BE=AA=E7=8E=AF=E5=AF=BC=E8=87=B4=E7=88=86?= =?UTF-8?q?=E6=A0=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/gs_package.cpp | 15 +++- src/common/backend/parser/parse_type.cpp | 3 - src/common/pl/plpgsql/src/pl_handler.cpp | 6 +- src/common/pl/plpgsql/src/pl_scanner.cpp | 2 +- .../process/threadpool/knl_session.cpp | 1 + src/include/knl/knl_session.h | 3 +- src/include/utils/plpgsql.h | 1 + .../regress/expected/hw_package_function.out | 79 +++++++++++++++++++ src/test/regress/sql/hw_package_function.sql | 60 ++++++++++++++ 9 files changed, 162 insertions(+), 8 deletions(-) diff --git a/src/common/backend/catalog/gs_package.cpp b/src/common/backend/catalog/gs_package.cpp index f256603a10..6cca722786 100755 --- a/src/common/backend/catalog/gs_package.cpp +++ b/src/common/backend/catalog/gs_package.cpp @@ -282,18 +282,29 @@ PLpgSQL_package* PackageInstantiation(Oid packageOid) ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for package %u, while compile package", packageOid))); } - packagebodydatum = SysCacheGetAttr(PROCOID, pkgTuple, Anum_gs_package_pkgbodydeclsrc, &isnull); + packagebodydatum = SysCacheGetAttr(PACKAGEOID, pkgTuple, Anum_gs_package_pkgbodydeclsrc, &isnull); if (isnull) { isSpec = true; } else { isSpec = false; } + + (void)SysCacheGetAttr(PACKAGEOID, pkgTuple, Anum_gs_package_pkgbodyinitsrc, &isnull); ReleaseSysCache(pkgTuple); SPI_STACK_LOG("push cond", NULL, NULL); pushed = SPI_push_conditional(); + if (!isnull) { + pkg = GetCompileListPkg(packageOid); + } + if (pkg != NULL) { + u_sess->plsql_cxt.need_init = false; + } else { + u_sess->plsql_cxt.need_init = true; + } pkg = plpgsql_package_validator(packageOid, isSpec); + u_sess->plsql_cxt.need_init = true; SPI_STACK_LOG("pop cond", NULL, NULL); SPI_pop_conditional(pushed); @@ -642,7 +653,9 @@ Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, SetCurrCompilePgObjStatus(true); list_free_ext(u_sess->plsql_cxt.func_compiled_list); u_sess->plsql_cxt.real_func_num = 0; + u_sess->plsql_cxt.need_init = false; plpgsql_package_validator(oldPkgOid, false, true); + u_sess->plsql_cxt.need_init = true; plpgsql_clear_created_pkg(oldPkgOid); return oldPkgOid; } diff --git a/src/common/backend/parser/parse_type.cpp b/src/common/backend/parser/parse_type.cpp index f0d1c02bac..34f329f182 100644 --- a/src/common/backend/parser/parse_type.cpp +++ b/src/common/backend/parser/parse_type.cpp @@ -1736,9 +1736,6 @@ Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid, Oid n if (enable_plpgsql_gsdependency_guc()) { pkgValid = GetPgObjectValid(pkgOid, OBJECT_TYPE_PKGSPEC); } - if (pkgValid) { - // check_record_nest_tableof_index_type(NULL, typeNames); - } return typOid; } diff --git a/src/common/pl/plpgsql/src/pl_handler.cpp b/src/common/pl/plpgsql/src/pl_handler.cpp index 314df2a13a..54cd4f041b 100755 --- a/src/common/pl/plpgsql/src/pl_handler.cpp +++ b/src/common/pl/plpgsql/src/pl_handler.cpp @@ -1319,8 +1319,10 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) retval = plpgsql_exec_autonm_function(func, &fake_fcinfo, codeblock->source_text); } else { Oid old_value = saveCallFromPkgOid(func->pkg_oid); + u_sess->plsql_cxt.need_init = true; retval = plpgsql_exec_function(func, &fake_fcinfo, false); restoreCallFromPkgOid(old_value); + u_sess->plsql_cxt.need_init = true; } } PG_CATCH(); @@ -1870,7 +1872,7 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate, bool isSpec, bool isNeedCo List* temp_tableof_index = NULL; bool save_is_package_instantiation = u_sess->plsql_cxt.is_package_instantiation; bool needExecDoStmt = true; - if (enable_plpgsql_undefined()) { + if (enable_plpgsql_undefined()) { needExecDoStmt = GetCurrCompilePgObjStatus(); } ResourceOwnerData* oldowner = NULL; @@ -1884,7 +1886,7 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate, bool isSpec, bool isNeedCo PG_TRY(); { u_sess->plsql_cxt.is_package_instantiation = true; - if (needExecDoStmt) { + if (needExecDoStmt && u_sess->plsql_cxt.need_init) { init_do_stmt(pkg, isCreate, cell, oldCompileStatus, curr_compile, temp_tableof_index, oldcxt); } if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { diff --git a/src/common/pl/plpgsql/src/pl_scanner.cpp b/src/common/pl/plpgsql/src/pl_scanner.cpp index 189ccdaf3a..a76f651813 100644 --- a/src/common/pl/plpgsql/src/pl_scanner.cpp +++ b/src/common/pl/plpgsql/src/pl_scanner.cpp @@ -1154,7 +1154,7 @@ bool plpgsql_is_token_keyword(int token) } } -static PLpgSQL_package* GetCompileListPkg(Oid pkgOid) +PLpgSQL_package* GetCompileListPkg(Oid pkgOid) { List* compPkgList = u_sess->plsql_cxt.compile_context_list; ListCell *item = NULL; diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 2545a587c2..58b7568f49 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -880,6 +880,7 @@ static void knl_u_plpgsql_init(knl_u_plpgsql_context* plsql_cxt) plsql_cxt->isCreatePkg = false; plsql_cxt->isCreatePkgFunction = false; plsql_cxt->currCompilingObjStatus = true; + plsql_cxt->need_init = true; } static void knl_u_stat_init(knl_u_stat_context* stat_cxt) diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 0eb7b65522..c45a88cdbd 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -1704,7 +1704,6 @@ typedef struct knl_u_plpgsql_context { // gs depend bool compile_has_warning_info; - bool expr_can_have_out_func; bool currCompilingObjStatus; bool need_create_depend; bool during_compile; @@ -1730,6 +1729,8 @@ typedef struct knl_u_plpgsql_context { int compile_check_node_level; int real_func_num; HTAB* plpgsql_lock_objects; + + bool need_init; } knl_u_plpgsql_context; //this is used to define functions in package diff --git a/src/include/utils/plpgsql.h b/src/include/utils/plpgsql.h index d4de942e3e..e4d72f3056 100644 --- a/src/include/utils/plpgsql.h +++ b/src/include/utils/plpgsql.h @@ -1978,6 +1978,7 @@ extern char* plpgsql_get_curline_query(); extern void plpgsql_process_stmt_array(StringInfo buf, List* bracket_loc); extern void plpgsql_append_object_typename(StringInfo buf, PLpgSQL_type *var_type); extern void CheckSaveExceptionsDML(int errstate); +extern PLpgSQL_package* GetCompileListPkg(Oid pkgOid); extern void plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata); /* ---------- diff --git a/src/test/regress/expected/hw_package_function.out b/src/test/regress/expected/hw_package_function.out index 6c20ddcef6..dee242a315 100644 --- a/src/test/regress/expected/hw_package_function.out +++ b/src/test/regress/expected/hw_package_function.out @@ -911,6 +911,85 @@ end trigger_test; / DROP PACKAGE trigger_test; NOTICE: drop cascades to function pkg_user2.tri_insert_func() +-- create packages whose package body both have an initialization anonymous block, and the initialization functions are nestedly called. +CREATE OR REPLACE PACKAGE nested_pkg1 is +function nested_pkg1_fun1(i int) return int; +function nested_pkg1_fun2(i int) return int; +id1 int:= 1; +end nested_pkg1; +/ +CREATE OR REPLACE PACKAGE nested_pkg2 is +function nested_pkg2_fun1(i number) return number; +function nested_pkg2_fun2(i numeric) return numeric; +id2 int:= 2; +end nested_pkg2; +/ +CREATE OR REPLACE PACKAGE BODY nested_pkg1 is +function nested_pkg1_fun1(i int) return int is +res int; +begin +res := nested_pkg2.id2; +return res; +end; +function nested_pkg1_fun2(i int) return int is +res int; +begin +res := i; +return res; +end; +begin +id1 := nested_pkg2.nested_pkg2_fun2(5); +end nested_pkg1; +/ +CREATE OR REPLACE PACKAGE BODY nested_pkg2 is +function nested_pkg2_fun1(i number) return number is +res int; +begin +res := nested_pkg1.id1; +return res; +end; +function nested_pkg2_fun2(i numeric) return numeric is +res int; +begin +res := i; +return res; +end; +begin +id2 := nested_pkg1.nested_pkg1_fun2(10); +end nested_pkg2; +/ +select nested_pkg2.nested_pkg2_fun1(1); + nested_pkg2_fun1 +------------------ + 5 +(1 row) + +select nested_pkg1.nested_pkg1_fun1(1); + nested_pkg1_fun1 +------------------ + 10 +(1 row) + +select nested_pkg2.nested_pkg2_fun2(1); + nested_pkg2_fun2 +------------------ + 1 +(1 row) + +select nested_pkg1.nested_pkg1_fun2(1); + nested_pkg1_fun2 +------------------ + 1 +(1 row) + +drop package nested_pkg1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_user2.nested_pkg1_fun1(integer) +drop cascades to function pkg_user2.nested_pkg1_fun2(integer) +drop package nested_pkg2; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_user2.nested_pkg2_fun1(numeric) +drop cascades to function pkg_user2.nested_pkg2_fun2(numeric) \c regression drop database db; drop user pkg_user1; diff --git a/src/test/regress/sql/hw_package_function.sql b/src/test/regress/sql/hw_package_function.sql index d41a8f712b..2f410c5d5d 100644 --- a/src/test/regress/sql/hw_package_function.sql +++ b/src/test/regress/sql/hw_package_function.sql @@ -751,6 +751,66 @@ end trigger_test; / DROP PACKAGE trigger_test; + +-- create packages whose package body both have an initialization anonymous block, and the initialization functions are nestedly called. +CREATE OR REPLACE PACKAGE nested_pkg1 is +function nested_pkg1_fun1(i int) return int; +function nested_pkg1_fun2(i int) return int; +id1 int:= 1; +end nested_pkg1; +/ + +CREATE OR REPLACE PACKAGE nested_pkg2 is +function nested_pkg2_fun1(i number) return number; +function nested_pkg2_fun2(i numeric) return numeric; +id2 int:= 2; +end nested_pkg2; +/ + +CREATE OR REPLACE PACKAGE BODY nested_pkg1 is +function nested_pkg1_fun1(i int) return int is +res int; +begin +res := nested_pkg2.id2; +return res; +end; +function nested_pkg1_fun2(i int) return int is +res int; +begin +res := i; +return res; +end; +begin +id1 := nested_pkg2.nested_pkg2_fun2(5); +end nested_pkg1; +/ + +CREATE OR REPLACE PACKAGE BODY nested_pkg2 is +function nested_pkg2_fun1(i number) return number is +res int; +begin +res := nested_pkg1.id1; +return res; +end; +function nested_pkg2_fun2(i numeric) return numeric is +res int; +begin +res := i; +return res; +end; +begin +id2 := nested_pkg1.nested_pkg1_fun2(10); +end nested_pkg2; +/ + +select nested_pkg2.nested_pkg2_fun1(1); +select nested_pkg1.nested_pkg1_fun1(1); +select nested_pkg2.nested_pkg2_fun2(1); +select nested_pkg1.nested_pkg1_fun2(1); + +drop package nested_pkg1; +drop package nested_pkg2; + \c regression drop database db; drop user pkg_user1; -- Gitee From 80263d9aa741013e8c0e6ef216b3a5501d9a88c6 Mon Sep 17 00:00:00 2001 From: laishenghao Date: Wed, 17 Jul 2024 11:46:00 +0800 Subject: [PATCH 055/347] =?UTF-8?q?concat=E3=80=81concat=5Fws=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E6=94=AF=E6=8C=81simplify=EF=BC=9B=E5=AD=97=E6=AE=B5?= =?UTF-8?q?=E5=BC=95=E7=94=A8=E8=A1=A8=E8=BE=BE=E5=BC=8F=E5=85=BC=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_expr.cpp | 27 +- src/gausskernel/optimizer/util/clauses.cpp | 54 +- .../expected/stable_function_shippable.out | 468 ++++++++++++++++++ .../regress/input/insert_right_ref.source | 2 +- .../regress/output/insert_right_ref.source | 12 +- .../regress/sql/stable_function_shippable.sql | 77 +++ 6 files changed, 630 insertions(+), 10 deletions(-) diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 7a55479be9..85384adaee 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -232,6 +232,31 @@ static Const* BuildColumnBaseValue(Form_pg_attribute attTup) return nullptr; } +static bool IsConstDefaultValue(FuncExpr* expr) +{ + if (expr->funcformat != COERCE_IMPLICIT_CAST) { + return false; + } + + ListCell* cell = NULL; + List* args = expr->args; + bool isFirstArg = true; + + foreach (cell, args) { + Node* arg = (Node*)lfirst(cell); + if (isFirstArg) { + isFirstArg = false; + if (!IsA(arg, Const) && + !(IsA(arg, FuncExpr) && ((FuncExpr*)arg)->funcformat == COERCE_IMPLICIT_CAST)) { + return false; + } + } else if (!IsA(arg, Const)) { + return false; + } + } + return true; +} + static void AddDefaultExprNode(ParseState* pstate) { RightRefState* refState = pstate->rightRefState; @@ -265,7 +290,7 @@ static void AddDefaultExprNode(ParseState* pstate) refState->constValues[i] = nullptr; } else if (IsA(node, Const)) { refState->constValues[i] = (Const*)node; - } else if (IsA(node, FuncExpr)) { + } else if (IsA(node, FuncExpr) && IsConstDefaultValue((FuncExpr*)node)) { FuncExpr* expr = (FuncExpr*)node; List* args = expr->args; Expr* simple = simplify_function(expr->funcid, expr->funcresulttype, exprTypmod((const Node*)expr), diff --git a/src/gausskernel/optimizer/util/clauses.cpp b/src/gausskernel/optimizer/util/clauses.cpp index 642ed7753d..b447c3ecf4 100644 --- a/src/gausskernel/optimizer/util/clauses.cpp +++ b/src/gausskernel/optimizer/util/clauses.cpp @@ -4197,7 +4197,7 @@ static Oid pre_evaluate_func[] = {CURRENTSCHEMAFUNCOID, CURRENTDATABASEFUNCOID, PGCLIENTENCODINGFUNCOID}; -static bool is_safe_simplify_func(Oid funcid) +static bool is_safe_simplify_func(Oid funcid, List *args) { if (funcid == InvalidOid) { return false; @@ -4207,6 +4207,56 @@ static bool is_safe_simplify_func(Oid funcid) return true; } } + + /* handle some special func */ + if (funcid == CONCATFUNCOID || funcid == CONCATWSFUNCOID) { + ListCell* arg = NULL; + foreach (arg, args) { + Oid typ = exprType((Node*)lfirst(arg)); + /* + * binary: not ok, bytea_output will affect the result. raw, etc... + * binary: BINARY, VARBINARY, BLOB, TINYBLOB, MEDIUMBLOB, LONGBLOB, bit. althought the concat_internal treat + * them specially, but the concat result is blob, so the result still affect by bytea_output. + * time: not ok, DateStyle will affect the result. time, timestamp, date, etc... + * num: ok, integer, float, numeric + * bool: ok + * string: ok, char/varchar/text/xml/json/set/enum/xml/unknown, etc... + */ + switch (typ) { + case BOOLOID: + case CHAROID: + case NAMEOID: + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case INT16OID: + case TEXTOID: + case OIDOID: + case CLOBOID: + case JSONOID: + case XMLOID: + case UNKNOWNOID: + case VARCHAROID: + case VARBITOID: + case CSTRINGOID: + case JSONBOID: + case NVARCHAR2OID: + case XIDOID: + case SHORTXIDOID: + break; + default: + if (type_is_set(typ) || type_is_enum(typ)) { + break; + } + /* other case, return false directly */ + return false; + } + } + /* all args outfunc are immutable, return true */ + return true; + } + return false; } @@ -4295,7 +4345,7 @@ static Expr* evaluate_function(Oid funcid, Oid result_type, int32 result_typmod, /* okay */; else if (context->estimate && funcform->provolatile == PROVOLATILE_STABLE) /* okay */; - else if (is_safe_simplify_func(funcid)) + else if (is_safe_simplify_func(funcid, args)) /* okay */; else return NULL; diff --git a/src/test/regress/expected/stable_function_shippable.out b/src/test/regress/expected/stable_function_shippable.out index 8ab9f5fc16..c87bedb2e8 100644 --- a/src/test/regress/expected/stable_function_shippable.out +++ b/src/test/regress/expected/stable_function_shippable.out @@ -778,3 +778,471 @@ explain (costs off, verbose on) select * from test where date_trunc('year', b) > --clean up drop table test; +create table concat_ws_t(a text); +-- test concat +explain(costs off) select * from concat_ws_t where a like concat('%', '1', '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bool, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%t%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::"char", '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::name, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int1, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int2, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int4, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int8, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int16, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::text, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::oid, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::clob, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '{"a":1}'::json, '%'); + QUERY PLAN +------------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%{"a":1}%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::xml, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::varchar, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::varbit, '%')::text; + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::cstring, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '{"a":1}'::jsonb, '%'); + QUERY PLAN +------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ '%{"a": 1}%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::nvarchar2, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::xid, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::xid32, '%'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '%1%'::text) +(2 rows) + +-- some can't do simply func +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::money, '%'); + QUERY PLAN +--------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '$1.00'::money, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::raw, '%'); + QUERY PLAN +---------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '01'::raw, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bytea, '%'); + QUERY PLAN +-------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '\x31'::bytea, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '2022-11-11'::date, '%'); + QUERY PLAN +------------------------------------------------------------------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '2022-11-11 00:00:00'::timestamp(0) without time zone, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '2022-11-11 11:11:11'::timestamp, '%'); + QUERY PLAN +--------------------------------------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '2022-11-11 11:11:11'::timestamp without time zone, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '11:11:11'::time, '%'); + QUERY PLAN +----------------------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '11:11:11'::time without time zone, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::numeric, '%'); + QUERY PLAN +----------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', 1::numeric, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bpchar, '%'); + QUERY PLAN +------------------------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '1'::bpchar, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::float4, '%'); + QUERY PLAN +-------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', 1::real, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::float8, '%'); + QUERY PLAN +-------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', 1::double precision, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::blob, '%')::text; + QUERY PLAN +----------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', '01'::blob, '%')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bit(8), '%')::text; + QUERY PLAN +-------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat('%', B'10000000'::bit(8), '%')) +(2 rows) + +-- -- test concat_ws +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1', '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bool, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ 't23'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::"char", '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::name, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int1, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int2, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int4, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int8, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int16, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::text, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::oid, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::clob, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '{"a":1}'::json, '3'); + QUERY PLAN +------------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '{"a":1}23'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::xml, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::varchar, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::varbit, '3')::text; + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::cstring, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '{"a":1}'::jsonb, '3'); + QUERY PLAN +------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ '{"a": 1}23'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::nvarchar2, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::xid, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::xid32, '3'); + QUERY PLAN +------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ '123'::text) +(2 rows) + +-- some can't do simply func +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::money, '3'); + QUERY PLAN +------------------------------------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '$1.00'::money, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::raw, '3'); + QUERY PLAN +------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '01'::raw, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bytea, '3'); + QUERY PLAN +----------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '\x31'::bytea, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '2022-11-11'::date, '3'); + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '2022-11-11 00:00:00'::timestamp(0) without time zone, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '2022-11-11 11:11:11'::timestamp, '3'); + QUERY PLAN +------------------------------------------------------------------------------------------------ + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '2022-11-11 11:11:11'::timestamp without time zone, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '11:11:11'::time, '3'); + QUERY PLAN +-------------------------------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '11:11:11'::time without time zone, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::numeric, '3'); + QUERY PLAN +-------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, 1::numeric, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bpchar, '3'); + QUERY PLAN +--------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '1'::bpchar, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::float4, '3'); + QUERY PLAN +----------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, 1::real, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::float8, '3'); + QUERY PLAN +----------------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, 1::double precision, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::blob, '3')::text; + QUERY PLAN +-------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, '01'::blob, '3')) +(2 rows) + +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bit(8), '3')::text; + QUERY PLAN +----------------------------------------------------------------- + Seq Scan on concat_ws_t + Filter: (a ~~ concat_ws('2'::text, B'10000000'::bit(8), '3')) +(2 rows) + +drop table concat_ws_t; diff --git a/src/test/regress/input/insert_right_ref.source b/src/test/regress/input/insert_right_ref.source index 0c999191f2..91d8036446 100644 --- a/src/test/regress/input/insert_right_ref.source +++ b/src/test/regress/input/insert_right_ref.source @@ -183,7 +183,7 @@ select (c6 is not null) as c6_is_not_null, (c12 is not null) as c12_is_not_null from time_default_t where n = 1 or n = 3; -select (c6 is not null) c6_is_not_null, +select (c6 is null) as c6_is_null, (c8 is null) as c8_is_null, (c10 is null) as c10_is_null, (c12 is null) as c12_is_null diff --git a/src/test/regress/output/insert_right_ref.source b/src/test/regress/output/insert_right_ref.source index fce52485f9..3e5ea63bbf 100644 --- a/src/test/regress/output/insert_right_ref.source +++ b/src/test/regress/output/insert_right_ref.source @@ -215,8 +215,8 @@ from num_default_t; ?column? | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 | c11 | c12 | c13 | c14 | c15 | c16 | c17 | c18 | c19 | c20 | c21 ----------+----+----+----+----+----+----+----+----+----+-----+-----+-----+-----+-----+--------+-----+-------+-----+-----+-----+------ 3 | 1 | | 3 | | 5 | | 7 | | 9 | 10 | | t | | 14 | 15.000 | 16 | 17.00 | 18 | | 10 | 2037 - 3 | 1 | | 3 | | 5 | | 7 | | 9 | 10 | | t | | 14 | 15.000 | 16 | 17.00 | 18 | | | 2037 - 3 | 1 | | 3 | | 5 | | 7 | | 9 | 10 | | t | | 14 | 15.000 | 16 | 17.00 | 18 | | | 2037 + 3 | 1 | | 3 | | 5 | | 7 | | 9 | 10 | | t | | 14 | 15.000 | 16 | 17.00 | 18 | | | + 3 | 1 | | 3 | | 5 | | 7 | | 9 | 10 | | t | | 14 | 15.000 | 16 | 17.00 | 18 | | | 3 | 1 | | 3 | | 5 | | 7 | | 9 | 10 | | t | | 14 | 15.000 | 16 | 17.00 | 18 | | 10 | 2037 (4 rows) @@ -308,14 +308,14 @@ from time_default_t where n = 1 or n = 3; t | t | t | t (2 rows) -select (c6 is not null) c6_is_not_null, +select (c6 is null) as c6_is_null, (c8 is null) as c8_is_null, (c10 is null) as c10_is_null, (c12 is null) as c12_is_null from time_default_t where n = 2; - c6_is_not_null | c8_is_null | c10_is_null | c12_is_null -----------------+------------+-------------+------------- - t | t | t | t + c6_is_null | c8_is_null | c10_is_null | c12_is_null +------------+------------+-------------+------------- + t | t | t | t (1 row) select (c1=c2) as c1c2, diff --git a/src/test/regress/sql/stable_function_shippable.sql b/src/test/regress/sql/stable_function_shippable.sql index 1ec825c57f..d0fd1fbf52 100644 --- a/src/test/regress/sql/stable_function_shippable.sql +++ b/src/test/regress/sql/stable_function_shippable.sql @@ -329,3 +329,80 @@ explain (costs off, verbose on) select * from test where date_trunc('year', b) > --clean up drop table test; + +create table concat_ws_t(a text); +-- test concat +explain(costs off) select * from concat_ws_t where a like concat('%', '1', '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bool, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::"char", '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::name, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int1, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int2, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int4, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int8, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::int16, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::text, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::oid, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::clob, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '{"a":1}'::json, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::xml, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::varchar, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::varbit, '%')::text; +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::cstring, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '{"a":1}'::jsonb, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::nvarchar2, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::xid, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::xid32, '%'); + +-- some can't do simply func +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::money, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::raw, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bytea, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '2022-11-11'::date, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '2022-11-11 11:11:11'::timestamp, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '11:11:11'::time, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::numeric, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bpchar, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::float4, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::float8, '%'); +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::blob, '%')::text; +explain(costs off) select * from concat_ws_t where a like concat('%', '1'::bit(8), '%')::text; + +-- -- test concat_ws +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1', '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bool, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::"char", '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::name, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int1, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int2, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int4, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int8, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::int16, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::text, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::oid, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::clob, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '{"a":1}'::json, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::xml, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::varchar, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::varbit, '3')::text; +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::cstring, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '{"a":1}'::jsonb, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::nvarchar2, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::xid, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::xid32, '3'); + +-- some can't do simply func +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::money, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::raw, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bytea, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '2022-11-11'::date, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '2022-11-11 11:11:11'::timestamp, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '11:11:11'::time, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::numeric, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bpchar, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::float4, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::float8, '3'); +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::blob, '3')::text; +explain(costs off) select * from concat_ws_t where a like concat_ws('2', '1'::bit(8), '3')::text; + +drop table concat_ws_t; -- Gitee From 65da7e14f4bd704f73054f9cf8566d431827652d Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Fri, 28 Jun 2024 16:10:52 +0800 Subject: [PATCH 056/347] support smp cursor --- src/common/backend/utils/mmgr/portalmem.cpp | 4 +- .../optimizer/commands/portalcmds.cpp | 44 +++++++++++++++---- .../optimizer/plan/pgxcplan_single.cpp | 9 ++-- src/gausskernel/process/stream/execStream.cpp | 7 +++ src/gausskernel/process/stream/streamCore.cpp | 2 + src/gausskernel/process/tcop/pquery.cpp | 20 +++------ .../process/threadpool/knl_session.cpp | 1 + src/gausskernel/runtime/executor/execAmi.cpp | 7 +++ src/gausskernel/storage/lmgr/lock.cpp | 18 ++++++-- src/include/distributelayer/streamCore.h | 2 + src/include/executor/executor.h | 7 ++- src/include/knl/knl_session.h | 8 ++++ src/include/utils/portal.h | 10 +++++ 13 files changed, 106 insertions(+), 33 deletions(-) diff --git a/src/common/backend/utils/mmgr/portalmem.cpp b/src/common/backend/utils/mmgr/portalmem.cpp index f170f8eba1..223f6dcfaf 100755 --- a/src/common/backend/utils/mmgr/portalmem.cpp +++ b/src/common/backend/utils/mmgr/portalmem.cpp @@ -105,7 +105,7 @@ typedef struct portalhashent { inline void ReleaseStreamGroup(Portal portal) { #ifndef ENABLE_MULTIPLE_NODES - if (!IS_SPQ_RUNNING && !StreamThreadAmI()) { + if (!IS_SPQ_RUNNING && IS_STREAM_PORTAL) { portal->streamInfo.AttachToSession(); StreamNodeGroup::ReleaseStreamGroup(true); portal->streamInfo.Reset(); @@ -920,7 +920,7 @@ void AtAbort_Portals(bool STP_rollback) * estate is under the queryDesc, and stream threads use it. * we should wait all stream threads exit to cleanup queryDesc. */ - if (!StreamThreadAmI()) { + if (IS_STREAM_PORTAL) { portal->streamInfo.AttachToSession(); StreamNodeGroup::ReleaseStreamGroup(true, STREAM_ERROR); portal->streamInfo.Reset(); diff --git a/src/gausskernel/optimizer/commands/portalcmds.cpp b/src/gausskernel/optimizer/commands/portalcmds.cpp index 672a64f359..9204609046 100644 --- a/src/gausskernel/optimizer/commands/portalcmds.cpp +++ b/src/gausskernel/optimizer/commands/portalcmds.cpp @@ -142,6 +142,10 @@ void PerformCursorOpen(PlannedStmt* stmt, ParamListInfo params, const char* quer */ PortalStart(portal, params, 0, GetActiveSnapshot()); + if (u_sess->stream_cxt.global_obj != NULL) { + portal->streamInfo.ResetEnvForCursor(); + } + if (u_sess->pgxc_cxt.gc_fdw_snapshot) { PopActiveSnapshot(); } @@ -286,6 +290,11 @@ void PortalCleanup(Portal portal) saveResourceOwner = t_thrd.utils_cxt.CurrentResourceOwner; PG_TRY(); { +#ifndef ENABLE_MULTIPLE_NODES + if (IS_STREAM_PORTAL) { + portal->streamInfo.AttachToSession(); + } +#endif t_thrd.utils_cxt.CurrentResourceOwner = portal->resowner; ExecutorFinish(queryDesc); ExecutorEnd(queryDesc); @@ -294,14 +303,12 @@ void PortalCleanup(Portal portal) * estate is under the queryDesc, and stream threads use it. * we should wait all stream threads exit to cleanup queryDesc. */ - if (!StreamThreadAmI()) { - portal->streamInfo.AttachToSession(); + if (IS_STREAM_PORTAL) { StreamNodeGroup::ReleaseStreamGroup(true); portal->streamInfo.Reset(); } #else - if (t_thrd.spq_ctx.spq_role == ROLE_UTILITY && !StreamThreadAmI()) { - portal->streamInfo.AttachToSession(); + if (t_thrd.spq_ctx.spq_role == ROLE_UTILITY && IS_STREAM_PORTAL) { StreamNodeGroup::ReleaseStreamGroup(true); portal->streamInfo.Reset(); } @@ -421,11 +428,21 @@ void PersistHoldablePortal(Portal portal, bool is_rollback) PushActiveSnapshot(queryDesc->snapshot); - /* - * Rewind the executor: we need to store the entire result set in the - * tuplestore, so that subsequent backward FETCHs can be processed. - */ - ExecutorRewind(queryDesc); + if (IsA(queryDesc->planstate, StreamState)) { + /* + * Record current position when transaction commit for cursor with stream plan, + * so that subsequent absolute FETCHs can be processed properly. + */ + portal->commitPortalPos = portal->portalPos; + portal->portalPos = 0; + } else { + /* + * Rewind the executor: we need to store the entire result set in the + * tuplestore, so that subsequent backward FETCHs can be processed. + */ + portal->commitPortalPos = 0; + ExecutorRewind(queryDesc); + } /* * Change the destination to output to the tuplestore. Note we tell @@ -455,6 +472,15 @@ void PersistHoldablePortal(Portal portal, bool is_rollback) portal->queryDesc = NULL; /* prevent double shutdown */ ExecutorFinish(queryDesc); ExecutorEnd(queryDesc); + +#ifndef ENABLE_MULTIPLE_NODES + if (IS_STREAM_PORTAL) { + portal->streamInfo.AttachToSession(); + StreamNodeGroup::ReleaseStreamGroup(true, STREAM_COMPLETE); + portal->streamInfo.Reset(); + } +#endif + FreeQueryDesc(queryDesc); /* diff --git a/src/gausskernel/optimizer/plan/pgxcplan_single.cpp b/src/gausskernel/optimizer/plan/pgxcplan_single.cpp index 87c7d85db6..fd06bb2390 100755 --- a/src/gausskernel/optimizer/plan/pgxcplan_single.cpp +++ b/src/gausskernel/optimizer/plan/pgxcplan_single.cpp @@ -72,7 +72,7 @@ * If Stream is supported, a copy of the 'query' is returned as a backup in case generating a plan * with Stream fails. */ -static Query* check_shippable(bool *stream_unsupport, Query* query, shipping_context* context) +static Query* check_shippable(bool *stream_unsupport, Query* query, shipping_context* context, int cursorOptions) { if (u_sess->attr.attr_sql.rewrite_rule & PARTIAL_PUSH) { *stream_unsupport = !context->query_shippable; @@ -86,9 +86,10 @@ static Query* check_shippable(bool *stream_unsupport, Query* query, shipping_con u_sess->opt_cxt.is_dngather_support = false; } - /* single node do not support parallel query in cursor */ + /* single node support parallel query in cursor only when it is no-scroll cursor */ if (query->utilityStmt && IsA(query->utilityStmt, DeclareCursorStmt)) { - *stream_unsupport = true; + cursorOptions = cursorOptions | ((DeclareCursorStmt*)query->utilityStmt)->options; + *stream_unsupport = !(cursorOptions & CURSOR_OPT_NO_SCROLL) ? true : *stream_unsupport; } if (*stream_unsupport || !IS_STREAM) { @@ -128,7 +129,7 @@ PlannedStmt* pgxc_planner(Query* query, int cursorOptions, ParamListInfo boundPa (void)stream_walker((Node*)query, (void*)(&context)); disable_unshipped_log(query, &context); - re_query = check_shippable(&stream_unsupport, query, &context); + re_query = check_shippable(&stream_unsupport, query, &context, cursorOptions); } else { if (unlikely(u_sess->attr.attr_sql.enable_unshipping_log)) { errno_t sprintf_rc = sprintf_s(u_sess->opt_cxt.not_shipping_info->not_shipping_reason, diff --git a/src/gausskernel/process/stream/execStream.cpp b/src/gausskernel/process/stream/execStream.cpp index ae0e0633bf..d6654253d0 100755 --- a/src/gausskernel/process/stream/execStream.cpp +++ b/src/gausskernel/process/stream/execStream.cpp @@ -1069,6 +1069,13 @@ void BuildStreamFlow(PlannedStmt* plan) u_sess->stream_cxt.global_obj = New(u_sess->stream_cxt.stream_runtime_mem_cxt) StreamNodeGroup(); u_sess->stream_cxt.global_obj->m_streamRuntimeContext = u_sess->stream_cxt.stream_runtime_mem_cxt; +#ifndef ENABLE_MULTIPLE_NODES + if (StreamTopConsumerAmI() && ActivePortal != NULL) { + ActivePortal->streamInfo.RecordSessionInfo(); + u_sess->stream_cxt.global_obj->m_portal = ActivePortal; + } +#endif + StreamFlowCtl ctl; ctl.pstmt = plan; ctl.plan = plan->planTree; diff --git a/src/gausskernel/process/stream/streamCore.cpp b/src/gausskernel/process/stream/streamCore.cpp index 4fdbc28c84..c5f33f79f3 100755 --- a/src/gausskernel/process/stream/streamCore.cpp +++ b/src/gausskernel/process/stream/streamCore.cpp @@ -961,6 +961,8 @@ void StreamNodeGroup::destroy(StreamObjStatus status) #endif u_sess->stream_cxt.global_obj->deInit(status); delete u_sess->stream_cxt.global_obj; + u_sess->stream_cxt.cursorNodeGroupList = list_delete(u_sess->stream_cxt.cursorNodeGroupList, + u_sess->stream_cxt.global_obj); u_sess->stream_cxt.global_obj = NULL; } diff --git a/src/gausskernel/process/tcop/pquery.cpp b/src/gausskernel/process/tcop/pquery.cpp index d7252a8d5b..15cbfe5364 100644 --- a/src/gausskernel/process/tcop/pquery.cpp +++ b/src/gausskernel/process/tcop/pquery.cpp @@ -793,6 +793,7 @@ void PortalStart(Portal portal, ParamListInfo params, int eflags, Snapshot snaps portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->commitPortalPos = 0; portal->posOverflow = false; PopActiveSnapshot(); @@ -819,6 +820,7 @@ void PortalStart(Portal portal, ParamListInfo params, int eflags, Snapshot snaps portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->commitPortalPos = 0; portal->posOverflow = false; break; @@ -846,6 +848,7 @@ void PortalStart(Portal portal, ParamListInfo params, int eflags, Snapshot snaps portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->commitPortalPos = 0; portal->posOverflow = false; break; @@ -905,15 +908,6 @@ void PortalSetResultFormat(Portal portal, int nFormats, int16* formats) int natts; int i; -#ifndef ENABLE_MULTIPLE_NODES -#ifndef USE_SPQ - if (StreamTopConsumerAmI()) { - portal->streamInfo.RecordSessionInfo(); - u_sess->stream_cxt.global_obj->m_portal = portal; - } -#endif -#endif - /* Do nothing if portal won't return tuples */ if (portal->tupDesc == NULL) return; @@ -2191,10 +2185,10 @@ static long DoPortalRunFetch(Portal portal, FetchDirection fdirection, long coun if (portal->atEnd) pos++; /* need one extra fetch if off end */ - if (count <= pos) - (void)PortalRunSelect(portal, false, pos - count + 1, None_Receiver); - else if (count > pos + 1) - (void)PortalRunSelect(portal, true, count - pos - 1, None_Receiver); + if (count - portal->commitPortalPos <= pos) + (void)PortalRunSelect(portal, false, pos - count + 1 + portal->commitPortalPos, None_Receiver); + else if (count - portal->commitPortalPos > pos + 1) + (void)PortalRunSelect(portal, true, count - pos - 1 - portal->commitPortalPos, None_Receiver); } return PortalRunSelect(portal, true, 1L, dest); } else if (count < 0) { diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 58b7568f49..96d8a737ea 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -326,6 +326,7 @@ static void knl_u_stream_init(knl_u_stream_context* stream_cxt) stream_cxt->stop_query_id = 0; stream_cxt->stream_runtime_mem_cxt = NULL; stream_cxt->data_exchange_mem_cxt = NULL; + stream_cxt->cursorNodeGroupList = NIL; } static void knl_u_sig_init(knl_u_sig_context* sig_cxt) diff --git a/src/gausskernel/runtime/executor/execAmi.cpp b/src/gausskernel/runtime/executor/execAmi.cpp index dc033b7044..e860c8c840 100755 --- a/src/gausskernel/runtime/executor/execAmi.cpp +++ b/src/gausskernel/runtime/executor/execAmi.cpp @@ -366,6 +366,13 @@ void ExecReScan(PlanState* node) ReScanExprContext(node->ps_ExprContext); } +#ifndef ENABLE_MULTIPLE_NODES + /* if the cursor has executed by stream, it cannot rescan anymore. */ + if (IsA(node, StreamState)) { + ereport(ERROR, (errmsg("cursor with stream plan do not support scan backward."))); + } +#endif + /* If need stub execution, stop rescan here */ if (!planstate_need_stub(node)) { if (IS_PGXC_DATANODE && EXEC_IN_RECURSIVE_MODE(node->plan) && IsA(node, StreamState)) { diff --git a/src/gausskernel/storage/lmgr/lock.cpp b/src/gausskernel/storage/lmgr/lock.cpp index 3ef052ed4a..88acd0c8c6 100644 --- a/src/gausskernel/storage/lmgr/lock.cpp +++ b/src/gausskernel/storage/lmgr/lock.cpp @@ -591,6 +591,16 @@ bool IsOtherProcRedistribution(PGPROC *otherProc) */ inline bool IsInSameTransaction(PGPROC *proc1, PGPROC *proc2) { + if (has_backend_cursor_stream()) { + ListCell *lc; + foreach(lc, u_sess->stream_cxt.cursorNodeGroupList) { + StreamNodeGroup* streamNodeGroup = (StreamNodeGroup*)lfirst(lc); + Assert(streamNodeGroup != u_sess->stream_cxt.global_obj); + if (streamNodeGroup->inNodeGroup(proc1->pid, proc2->pid)) { + return true; + } + } + } return u_sess->stream_cxt.global_obj == NULL ? false : u_sess->stream_cxt.global_obj->inNodeGroup(proc1->pid, proc2->pid); } @@ -619,8 +629,8 @@ void CancelConflictLockWaiter(PROCLOCK *proclock, LOCK *lock, LockMethod lockMet bool conflictLocks = ((lockMethodTable->conflictTab[lockmode] & LOCKBIT_ON((unsigned int)proc->waitLockMode)) != 0); PGPROC *leader2 = (proc->lockGroupLeader == NULL) ? proc : proc->lockGroupLeader; - bool isSameTrans = ((StreamTopConsumerAmI() || StreamThreadAmI()) && IsInSameTransaction(proc, t_thrd.proc)) || - (leader1 == leader2); + bool isSameTrans = ((StreamTopConsumerAmI() || StreamThreadAmI() || has_backend_cursor_stream()) && + IsInSameTransaction(proc, t_thrd.proc)) || (leader1 == leader2); /* send term to waitqueue proc while conflict and not in a stream or lock group */ if (conflictLocks && !isSameTrans && !IsPrepareXact(proc) && proc->pid != 0 && gs_signal_send(proc->pid, SIGTERM) < 0) { @@ -646,7 +656,7 @@ void CancelConflictLockHolder(PROCLOCK *proclock, LOCK *lock, LockMethod lockMet bool conflictLocks = ((lockMethodTable->conflictTab[lockmode] & otherProcLock->holdMask) != 0); PGPROC *leader2 = (otherProcLock->tag.myProc->lockGroupLeader == NULL) ? otherProcLock->tag.myProc : otherProcLock->tag.myProc->lockGroupLeader; - bool isSameTrans = ((StreamTopConsumerAmI() || StreamThreadAmI()) && + bool isSameTrans = ((StreamTopConsumerAmI() || StreamThreadAmI() || has_backend_cursor_stream()) && IsInSameTransaction(otherProcLock->tag.myProc, t_thrd.proc)) || (leader1 == leader2); /* send term to holder proc while conflict and not in a stream or lock group */ if (conflictLocks && !isSameTrans && !IsPrepareXact(otherProcLock->tag.myProc) && @@ -1322,7 +1332,7 @@ int LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock * thread is in one transaction, but these threads use differnt procs. * We need treat these procs as one proc */ - if (StreamTopConsumerAmI() || StreamThreadAmI() || inLockGroup) { + if (StreamTopConsumerAmI() || StreamThreadAmI() || has_backend_cursor_stream() || inLockGroup) { SHM_QUEUE *otherProcLocks = &(lock->procLocks); PROCLOCK *otherProcLock = (PROCLOCK *)SHMQueueNext(otherProcLocks, otherProcLocks, offsetof(PROCLOCK, lockLink)); diff --git a/src/include/distributelayer/streamCore.h b/src/include/distributelayer/streamCore.h index d899cbf01c..76b455c353 100755 --- a/src/include/distributelayer/streamCore.h +++ b/src/include/distributelayer/streamCore.h @@ -58,6 +58,8 @@ #define TupleVectorMaxSize 100 +#define IS_STREAM_PORTAL (!StreamThreadAmI() && portal->streamInfo.streamGroup != NULL) + struct StreamState; class StreamObj; class StreamNodeGroup; diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 136dd6d932..0889f858bf 100755 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -262,6 +262,7 @@ extern bool NeedStubExecution(Plan* plan); extern TupleTableSlot* FetchPlanSlot(PlanState* subPlanState, ProjectionInfo** projInfos, bool isinherit); extern long ExecGetPlanMemCost(Plan* node); +extern bool executorEarlyStop(); /* ---------------------------------------------------------------- * ExecProcNode @@ -276,6 +277,11 @@ static inline TupleTableSlot *ExecProcNode(PlanState *node) { TupleTableSlot* result; Assert(node->ExecProcNode); + + if (unlikely(executorEarlyStop())) { + return NULL; + } + if (unlikely(node->nodeContext)) { MemoryContext old_context = MemoryContextSwitchTo(node->nodeContext); /* Switch to Node Level Memory Context */ if (node->chgParam != NULL) /* something changed? */ @@ -686,7 +692,6 @@ extern void PthreadRWlockWrlock(ResourceOwner owner, pthread_rwlock_t* rwlock); extern void PthreadRWlockUnlock(ResourceOwner owner, pthread_rwlock_t* rwlock); extern void PthreadRwLockInit(pthread_rwlock_t* rwlock, pthread_rwlockattr_t *attr); -extern bool executorEarlyStop(); extern void ExecEarlyFree(PlanState* node); extern void ExecEarlyFreeBody(PlanState* node); extern void ExecReSetRecursivePlanTree(PlanState* node); diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index c45a88cdbd..8dae1a5667 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -112,6 +112,9 @@ typedef struct knl_u_stream_context { class StreamProducer* producer_obj; + /* List of StreamNodeGroup belong to current session that are active in the backend */ + List *cursorNodeGroupList; + MemoryContext stream_runtime_mem_cxt; /* Shared memory context for in-memory data exchange. */ @@ -3217,5 +3220,10 @@ inline void stp_reset_xact_state_and_err_msg(bool savedisAllowCommitRollback, bo } } +inline bool has_backend_cursor_stream() +{ + return list_length(u_sess->stream_cxt.cursorNodeGroupList) > 0; +} + #endif /* SRC_INCLUDE_KNL_KNL_SESSION_H_ */ diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h index bb8a0dae6a..4aa18f6de5 100644 --- a/src/include/utils/portal.h +++ b/src/include/utils/portal.h @@ -134,6 +134,15 @@ typedef struct PortalStream { u_sess->debug_query_id = 0; } + void ResetEnvForCursor() + { + Assert(u_sess->stream_cxt.global_obj != NULL); + (void)MemoryContextSwitchTo(u_sess->top_portal_cxt); + u_sess->stream_cxt.cursorNodeGroupList = lappend(u_sess->stream_cxt.cursorNodeGroupList, + u_sess->stream_cxt.global_obj); + ResetEnv(); + } + void RecordSessionInfo() { streamGroup = u_sess->stream_cxt.global_obj; @@ -230,6 +239,7 @@ typedef struct PortalData { bool atEnd; bool posOverflow; long portalPos; + long commitPortalPos; bool hasStreamForPlpgsql; /* true if plpgsql's portal has stream may cause hang in for-loop */ /* Presentation data, primarily used by the pg_cursors system view */ -- Gitee From 2afbe6fb562b8aac111d42588a54b9193b521507 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 15 Jul 2024 15:14:09 +0800 Subject: [PATCH 057/347] support smp for functionscan with cursor arg --- src/bin/pg_dump/pg_dump.cpp | 29 +++- src/common/backend/catalog/CMakeLists.txt | 2 +- src/common/backend/catalog/Makefile | 4 +- src/common/backend/catalog/pg_proc.cpp | 7 +- src/common/backend/catalog/pg_proc_ext.cpp | 155 ++++++++++++++++++ src/common/backend/nodes/copyfuncs.cpp | 4 + src/common/backend/nodes/outfuncs.cpp | 4 + src/common/backend/nodes/readfuncs.cpp | 6 + src/common/backend/parser/gram.y | 64 +++++++- src/common/backend/parser/parse_expr.cpp | 55 ++++++- src/common/backend/parser/parse_relation.cpp | 11 ++ src/common/backend/utils/adt/ruleutils.cpp | 41 +++++ src/common/backend/utils/cache/relcache.cpp | 11 ++ src/common/backend/utils/cache/syscache.cpp | 9 +- src/common/backend/utils/init/globals.cpp | 4 +- .../interfaces/libpq/frontend_parser/gram.y | 3 +- src/common/pl/plpgsql/src/gram.y | 32 ++++ .../optimizer/commands/functioncmds.cpp | 67 +++++++- src/gausskernel/optimizer/plan/createplan.cpp | 37 ++++- .../optimizer/plan/streamplan_utils.cpp | 8 + src/gausskernel/optimizer/util/pathnode.cpp | 1 + src/gausskernel/optimizer/util/pgxcship.cpp | 6 +- src/gausskernel/optimizer/util/relnode.cpp | 5 +- src/gausskernel/process/stream/execStream.cpp | 23 ++- src/gausskernel/process/stream/streamCore.cpp | 4 +- src/gausskernel/process/stream/streamMain.cpp | 5 +- .../process/stream/streamProducer.cpp | 5 + src/gausskernel/process/tcop/postgres.cpp | 1 + .../process/threadpool/knl_session.cpp | 1 + src/gausskernel/storage/ipc/procsignal.cpp | 4 +- src/include/catalog/catversion.h | 2 +- src/include/catalog/indexing.h | 3 + src/include/catalog/pg_proc_ext.h | 65 ++++++++ src/include/catalog/pg_proc_fn.h | 3 +- .../rollback_catalog_maindb_92_941.sql | 4 + .../rollback_catalog_otherdb_92_941.sql | 4 + .../upgrade_catalog_maindb_92_941.sql | 15 ++ .../upgrade_catalog_otherdb_92_941.sql | 15 ++ src/include/distributelayer/streamCore.h | 2 + src/include/distributelayer/streamProducer.h | 2 + src/include/executor/exec/execStream.h | 1 + src/include/knl/knl_session.h | 2 + src/include/miscadmin.h | 1 + src/include/nodes/nodes.h | 1 + src/include/nodes/parsenodes.h | 1 + src/include/nodes/parsenodes_common.h | 13 ++ src/include/nodes/relation.h | 1 + src/include/optimizer/stream_cost.h | 2 + src/include/parser/kwlist.h | 1 + src/include/parser/parse_expr.h | 1 + src/include/utils/syscache.h | 3 +- 51 files changed, 712 insertions(+), 38 deletions(-) create mode 100644 src/common/backend/catalog/pg_proc_ext.cpp create mode 100644 src/include/catalog/pg_proc_ext.h create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_941.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_941.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 8a018f8ddc..080441d1bc 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -13394,6 +13394,9 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) bool addDelimiter = false; bool isNullSelfloop = false; const char *funcKind; + char* parallelCursorName = NULL; + char* parallelCursorStrategy = NULL; + char* parallelCursorPartKey = NULL; ArchiveHandle* AH = (ArchiveHandle*)fout; /* Skip if not to be dumped */ @@ -13426,9 +13429,9 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) */ appendPQExpBuffer(query, "SELECT proretset, prosrc, probin, " - "pg_catalog.pg_get_function_arguments(oid) AS funcargs, " - "pg_catalog.pg_get_function_identity_arguments(oid) AS funciargs, " - "pg_catalog.pg_get_function_result(oid) AS funcresult, " + "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, " + "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs, " + "pg_catalog.pg_get_function_result(p.oid) AS funcresult, " "proiswindow, provolatile, proisstrict, prosecdef, " "proleakproof, proconfig, procost, prorows, propackageid, proowner," "%s, " @@ -13437,9 +13440,11 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) "%s, " "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname, " "%s, " - "(SELECT 1 FROM pg_depend WHERE objid = oid AND objid = refobjid AND refclassid = 1255 LIMIT 1) AS selfloop " - "FROM pg_catalog.pg_proc " - "WHERE oid = '%u'::pg_catalog.oid", + "(SELECT 1 FROM pg_depend WHERE objid = p.oid AND objid = refobjid AND refclassid = 1255 LIMIT 1) AS selfloop, " + "proargnames[o.parallel_cursor_seq + 1] AS parallelCursorName, o.parallel_cursor_strategy AS parallelCursorStrategy, " + "pg_catalog.array_to_string(o.parallel_cursor_partkey, ', ') AS parallelCursorPartKey " + "FROM pg_catalog.pg_proc p left join pg_catalog.pg_proc_ext o on p.oid = o.proc_oid " + "WHERE p.oid = '%u'::pg_catalog.oid", isHasFencedmode ? "fencedmode" : "NULL AS fencedmode", isHasProshippable ? "proshippable" : "NULL AS proshippable", isHasPropackage ? "propackage" : "NULL AS propackage", @@ -13470,6 +13475,9 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) proshippable = PQgetvalue(res, 0, PQfnumber(res, "proshippable")); propackage = PQgetvalue(res, 0, PQfnumber(res, "propackage")); propackageid = PQgetvalue(res, 0, PQfnumber(res, "propackageid")); + parallelCursorName = PQgetvalue(res, 0, PQfnumber(res, "parallelCursorName")); + parallelCursorStrategy = PQgetvalue(res, 0, PQfnumber(res, "parallelCursorStrategy")); + parallelCursorPartKey = PQgetvalue(res, 0, PQfnumber(res, "parallelCursorPartKey")); if ((gdatcompatibility != NULL) && strcmp(gdatcompatibility, B_FORMAT) == 0) { /* get definer user name */ @@ -13691,6 +13699,15 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) } } + if (((int)strlen(parallelCursorName)) != 0) { + appendPQExpBuffer(q, " PARALLEL_ENABLE (PARTITION %s BY ", parallelCursorName); + if (parallelCursorStrategy[0] == '0') { + appendPQExpBuffer(q, "ANY)"); + } else if (parallelCursorStrategy[0] == '1') { + appendPQExpBuffer(q, "HASH(%s))", parallelCursorPartKey); + } + } + if (isHasPropackage && (propackage[0] == 't')) { appendPQExpBuffer(q, " PACKAGE"); } diff --git a/src/common/backend/catalog/CMakeLists.txt b/src/common/backend/catalog/CMakeLists.txt index 35defa2c6e..767b547cd1 100755 --- a/src/common/backend/catalog/CMakeLists.txt +++ b/src/common/backend/catalog/CMakeLists.txt @@ -15,7 +15,7 @@ set(POSTGRES_BKI_SRCS_S @pg_object.h @pg_synonym.h @toasting.h @indexing.h @gs_obsscaninfo.h @pg_directory.h @pg_hashbucket.h @gs_global_chain.h @gs_global_config.h @pg_streaming_stream.h @pg_streaming_cont_query.h @pg_streaming_reaper_status.h @gs_matview.h @gs_matview_dependency.h @pgxc_slice.h @gs_opt_model.h @pg_recyclebin.h @pg_snapshot.h @gs_model.h @gs_dependencies.h @gs_dependencies_obj.h @gs_package.h @gs_job_argument.h @gs_job_attribute.h @pg_uid.h @gs_db_privilege.h -@pg_replication_origin.h @pg_publication.h @pg_publication_rel.h @pg_subscription.h @gs_sql_patch.h @pg_subscription_rel.h" +@pg_replication_origin.h @pg_publication.h @pg_publication_rel.h @pg_subscription.h @gs_sql_patch.h @pg_subscription_rel.h @pg_proc_ext.h" ) diff --git a/src/common/backend/catalog/Makefile b/src/common/backend/catalog/Makefile index 6b7de7b10d..7522310069 100644 --- a/src/common/backend/catalog/Makefile +++ b/src/common/backend/catalog/Makefile @@ -25,7 +25,7 @@ OBJS = catalog.o dependency.o heap.o index.o indexing.o namespace.o aclchk.o \ pg_hashbucket.o cstore_ctlg.o pg_builtin_proc.o streaming_stream.o\ gs_matview.o pgxc_slice.o pg_job_proc.o gs_job_argument.o gs_job_attribute.o pg_uid.o gs_global_config.o\ gs_db_privilege.o pg_publication.o pg_subscription.o gs_utf8_collation.o gs_collation.o gs_gbk_collation.o\ - gs_gb18030_collation.o + gs_gb18030_collation.o pg_proc_ext.o BKIFILES = postgres.bki postgres.description postgres.shdescription @@ -62,7 +62,7 @@ POSTGRES_BKI_SRCS = $(addprefix $(top_srcdir)/src/include/catalog/,\ gs_matview_dependency.h pgxc_slice.h gs_opt_model.h gs_dependencies.h gs_dependencies_obj.h gs_package.h gs_model.h\ pg_recyclebin.h pg_snapshot.h gs_job_argument.h gs_job_attribute.h pg_uid.h gs_db_privilege.h\ pg_replication_origin.h pg_publication.h pg_publication_rel.h pg_subscription.h gs_sql_patch.h\ - pg_subscription_rel.h \ + pg_subscription_rel.h pg_proc_ext.h\ ) # location of Catalog.pm diff --git a/src/common/backend/catalog/pg_proc.cpp b/src/common/backend/catalog/pg_proc.cpp index 1ec062d954..946275a755 100644 --- a/src/common/backend/catalog/pg_proc.cpp +++ b/src/common/backend/catalog/pg_proc.cpp @@ -28,6 +28,7 @@ #include "catalog/gs_package.h" #include "catalog/pg_object.h" #include "catalog/pg_proc.h" +#include "catalog/pg_proc_ext.h" #include "catalog/gs_encrypted_proc.h" #include "catalog/pg_proc_fn.h" #include "catalog/pg_synonym.h" @@ -1064,7 +1065,8 @@ ObjectAddress ProcedureCreate(const char* procedureName, Oid procNamespace, Oid oidvector* parameterTypes, Datum allParameterTypes, Datum parameterModes, Datum parameterNames, List* parameterDefaults, Datum proconfig, float4 procost, float4 prorows, int2vector* prodefaultargpos, bool fenced, bool shippable, bool package, bool proIsProcedure, const char *proargsrc, bool isPrivate, - TypeDependExtend* paramTypDependExt, TypeDependExtend* retTypDependExt, CreateFunctionStmt* stmt, bool isPipelined) + TypeDependExtend* paramTypDependExt, TypeDependExtend* retTypDependExt, CreateFunctionStmt* stmt, bool isPipelined, + FunctionPartitionInfo* partInfo) { Oid retval; int parameterCount; @@ -1839,6 +1841,9 @@ ObjectAddress ProcedureCreate(const char* procedureName, Oid procNamespace, Oid /* Post creation hook for new function */ InvokeObjectAccessHook(OAT_POST_CREATE, ProcedureRelationId, retval, 0, NULL); + /* Record PARALLEL_ENABLE PARTITION BY INFO */ + InsertPgProcExt(retval, partInfo); + /* Recode the procedure create time. */ if (OidIsValid(retval)) { if (!is_update) { diff --git a/src/common/backend/catalog/pg_proc_ext.cpp b/src/common/backend/catalog/pg_proc_ext.cpp new file mode 100644 index 0000000000..98c91538cb --- /dev/null +++ b/src/common/backend/catalog/pg_proc_ext.cpp @@ -0,0 +1,155 @@ +/* ------------------------------------------------------------------------- + * + * pg_proc_ext.cpp + * routines to support manipulation of the pg_proc_ext relation + * + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors + * + * + * IDENTIFICATION + * src/common/backend/catalog/pg_proc_ext.cpp + * + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "catalog/pg_proc_ext.h" +#include "catalog/indexing.h" +#include "utils/builtins.h" +#include "utils/array.h" +#include "utils/syscache.h" +#include "access/heapam.h" + +static inline ArrayType* getPartKeysArr(List* partitionCols); + +/* + * @Description: Insert a new record to pg_proc_ext. + */ +void InsertPgProcExt(Oid oid, FunctionPartitionInfo* partInfo) +{ + Datum values[Natts_pg_proc_ext]; + bool nulls[Natts_pg_proc_ext]; + bool replaces[Natts_pg_proc_ext]; + HeapTuple tuple = NULL; + HeapTuple oldtuple = NULL; + Relation rel = NULL; + errno_t rc = 0; + + rel = heap_open(ProcedureExtensionRelationId, RowExclusiveLock); + + oldtuple = SearchSysCache1(PROCEDUREEXTENSIONOID, ObjectIdGetDatum(oid)); + if (partInfo == NULL) { + if (HeapTupleIsValid(oldtuple)) { + simple_heap_delete(rel, &oldtuple->t_self); + ReleaseSysCache(oldtuple); + } + heap_close(rel, RowExclusiveLock); + return; + } + + rc = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check(rc, "\0", "\0"); + rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); + securec_check_c(rc, "\0", "\0"); + rc = memset_s(replaces, sizeof(replaces), true, sizeof(replaces)); + securec_check_c(rc, "\0", "\0"); + + values[Anum_pg_proc_ext_proc_oid - 1] = ObjectIdGetDatum(oid); + values[Anum_pg_proc_ext_parallel_cursor_seq - 1] = UInt64GetDatum(partInfo->partitionCursorIndex); + values[Anum_pg_proc_ext_parallel_cursor_strategy - 1] = Int16GetDatum(partInfo->strategy); + values[Anum_pg_proc_ext_parallel_cursor_partkey - 1] = PointerGetDatum(getPartKeysArr(partInfo->partitionCols)); + + if (HeapTupleIsValid(oldtuple)) { + replaces[Anum_pg_proc_ext_proc_oid - 1] = false; + tuple = heap_modify_tuple(oldtuple, RelationGetDescr(rel), values, nulls, replaces); + simple_heap_update(rel, &tuple->t_self, tuple); + ReleaseSysCache(oldtuple); + } else { + tuple = heap_form_tuple(RelationGetDescr(rel), values, nulls); + (void)simple_heap_insert(rel, tuple); + } + CatalogUpdateIndexes(rel, tuple); + heap_freetuple_ext(tuple); + heap_close(rel, RowExclusiveLock); +} + +void DeletePgProcExt(Oid oid) +{ + Relation relation = NULL; + HeapTuple tup = NULL; + + relation = heap_open(ProcedureExtensionRelationId, RowExclusiveLock); + + tup = SearchSysCache1(PROCEDUREEXTENSIONOID, ObjectIdGetDatum(oid)); + if (HeapTupleIsValid(tup)) { + simple_heap_delete(relation, &tup->t_self); + ReleaseSysCache(tup); + } + heap_close(relation, RowExclusiveLock); +} + +static inline ArrayType* getPartKeysArr(List* partitionCols) +{ + Datum* partKeys = (Datum*)palloc0(list_length(partitionCols) * sizeof(Datum)); + ArrayType* partKeysArr = NULL; + ListCell* lc = NULL; + int i = 0; + foreach (lc, partitionCols) { + char* col = (char*)lfirst(lc); + partKeys[i++] = CStringGetTextDatum(col); + } + partKeysArr = construct_array(partKeys, list_length(partitionCols), TEXTOID, -1, false, 'i'); + return partKeysArr; +} + +int2 GetParallelCursorSeq(Oid oid) +{ + HeapTuple tuple = SearchSysCache1(PROCEDUREEXTENSIONOID, oid); + if (!HeapTupleIsValid(tuple)) { + return -1; + } + + bool isNull; + Datum dat = SysCacheGetAttr(PROCEDUREEXTENSIONOID, tuple, Anum_pg_proc_ext_parallel_cursor_seq, &isNull); + if (isNull) { + ReleaseSysCache(tuple); + return -1; + } + ReleaseSysCache(tuple); + return DatumGetInt16(dat); +} + +FunctionPartitionStrategy GetParallelStrategyAndKey(Oid oid, List** partkey) +{ + FunctionPartitionStrategy strategy = FUNC_PARTITION_ANY; + bool isNull; + HeapTuple tuple = SearchSysCache1(PROCEDUREEXTENSIONOID, ObjectIdGetDatum(oid)); + + if (!HeapTupleIsValid(tuple)) { + return strategy; + } + + Datum dat = SysCacheGetAttr(PROCEDUREEXTENSIONOID, tuple, Anum_pg_proc_ext_parallel_cursor_strategy, &isNull); + if (isNull) { + ReleaseSysCache(tuple); + return strategy; + } + strategy = (FunctionPartitionStrategy)DatumGetInt16(dat); + + dat = SysCacheGetAttr(PROCEDUREEXTENSIONOID, tuple, Anum_pg_proc_ext_parallel_cursor_partkey, &isNull); + + if (!isNull) { + ArrayType* arr = DatumGetArrayTypeP(dat); + Datum* argnames = NULL; + int numargs; + deconstruct_array(arr, TEXTOID, -1, false, 'i', &argnames, NULL, &numargs); + for (int i = 0; i < numargs; i++) { + *partkey = lappend(*partkey, TextDatumGetCString(argnames[i])); + } + } + + ReleaseSysCache(tuple); + return strategy; +} diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 09f330baac..b6bc8ed7eb 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -2281,6 +2281,10 @@ static Stream* _copyStream(const Stream* from) #ifdef USE_SPQ COPY_SCALAR_FIELD(streamID); #endif + if (t_thrd.proc->workingVersionNum >= PARALLEL_ENABLE_VERSION_NUM) { + COPY_SCALAR_FIELD(cursor_expr_level); + COPY_SCALAR_FIELD(cursor_owner_node_id); + } return newnode; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index f20c800a9b..65d2b39161 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -1210,6 +1210,10 @@ static void _outStream(StringInfo str, Stream* node) #ifdef USE_SPQ WRITE_INT_FIELD(streamID); #endif + if (t_thrd.proc->workingVersionNum >= PARALLEL_ENABLE_VERSION_NUM) { + WRITE_INT_FIELD(cursor_expr_level); + WRITE_INT_FIELD(cursor_owner_node_id); + } } /* diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 3a54b13419..12ffc3017b 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -767,6 +767,12 @@ THR_LOCAL bool skip_read_extern_fields = false; READ_INT_FIELD(stream_level); \ READ_NODE_FIELD(origin_consumer_nodes); \ READ_BOOL_FIELD(is_recursive_local); \ + IF_EXIST(cursor_expr_level) { \ + READ_INT_FIELD(cursor_expr_level); \ + } \ + IF_EXIST(cursor_owner_node_id) { \ + READ_INT_FIELD(cursor_owner_node_id); \ + } \ READ_STREAM_ID(); \ \ READ_DONE(); \ diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index f0b857684b..a75f413607 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -359,6 +359,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); struct CondInfo* condinfo; RotateClause *rotateinfo; UnrotateClause *unrotateinfo; + FunctionPartitionInfo *funcPartInfo; } %type stmt schema_stmt @@ -583,9 +584,10 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); %type for_locking_strength %type for_locking_item %type for_locking_clause opt_for_locking_clause for_locking_items -%type locked_rels_list +%type locked_rels_list colid_list %type opt_all +%type parallel_partition_opt %type join_outer join_qual %type join_type @@ -939,7 +941,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER OUTFILE - PACKAGE PACKAGES PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION + PACKAGE PACKAGES PARALLEL_ENABLE PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION PIPELINED /* PGXC_BEGIN */ POOL PRECEDING PRECISION @@ -17012,6 +17014,14 @@ common_func_opt_item: { $$ = makeDefElem("shippable", (Node*)makeInteger(FALSE)); } + | PARALLEL_ENABLE parallel_partition_opt + { + if (!DB_IS_CMPT(A_FORMAT)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("PARALLEL_ENABLE is only supported in A compatibility database."))); + } + $$ = makeDefElem("parallel_enable", (Node*)$2); + } | EXTERNAL SECURITY DEFINER { $$ = makeDefElem("security", (Node *)makeInteger(TRUE)); @@ -17076,6 +17086,55 @@ common_func_opt_item: } ; +parallel_partition_opt: + '(' PARTITION param_name BY ANY ')' + { + $$ = makeNode(FunctionPartitionInfo); + $$->strategy = FUNC_PARTITION_ANY; + $$->partitionCursor = $3; + + } + | '(' PARTITION param_name BY IDENT '(' colid_list ')' ')' + { + if (strcmp($5, "hash") != 0) { + const char* message = "Un-support feature"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized option \"%s\"", $3))); + } + $$ = makeNode(FunctionPartitionInfo); + $$->strategy = FUNC_PARTITION_HASH; + $$->partitionCursor = $3; + $$->partitionCols = $7; + } + | '(' PARTITION param_name BY RANGE '(' colid_list ')' ')' + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("PARALLEL_ENABLE PARTITION BY RANGE is not yet supported."))); + } + | '(' PARTITION param_name BY VALUE_P '(' ColId ')' ')' + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("PARALLEL_ENABLE PARTITION BY VALUE is not yet supported."))); + } + | /* EMPTY */ + { + $$ = NULL; + } + +colid_list: + ColId + { + $$ = list_make1($1); + } + | colid_list ',' ColId + { + $$ = lappend($1, $3); + } + ; + createfunc_opt_item: AS func_as { @@ -30144,6 +30203,7 @@ unreserved_keyword: | OWNER | PACKAGE | PACKAGES + | PARALLEL_ENABLE | PARSER | PARTIAL %prec PARTIAL_EMPTY_PREC | PARTITION diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 2298aed621..722db712d0 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -22,6 +22,7 @@ #include "catalog/pg_proc.h" #include "catalog/gs_package.h" #include "catalog/gs_collation.h" +#include "catalog/pg_proc_ext.h" #include "commands/dbcommands.h" #include "commands/sequence.h" #include "db4ai/predict_by.h" @@ -1866,10 +1867,15 @@ static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) ListCell* args = NULL; Node* result = NULL; - /* Transform the list of arguments ... */ + /* Transform the list of arguments, skip CursorExpr which transformed later */ targs = NIL; foreach (args, fn->args) { - targs = lappend(targs, transformExprRecurse(pstate, (Node*)lfirst(args))); + Node* arg = (Node*)lfirst(args); + if (!IsA(arg, CursorExpression)) { + targs = lappend(targs, transformExprRecurse(pstate, arg)); + } else { + targs = lappend(targs, arg); + } } if (fn->agg_within_group) { @@ -1884,6 +1890,26 @@ static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) /* ... and hand off to ParseFuncOrColumn */ result = ParseFuncOrColumn(pstate, fn->funcname, targs, last_srf, fn, fn->location, fn->call_func); + if (IsA(result, FuncExpr)) { + /* if function is not SRF or pipelined, close smp for all CursorExpressions */ + int2 seq = (!((FuncExpr*)result)->funcretset && + !PROC_IS_PIPELINED(get_func_prokind(((FuncExpr*)result)->funcid))) ? + -1 : GetParallelCursorSeq(((FuncExpr*)result)->funcid); + int2 i = 0; + AutoDopControl dopControl; + foreach (args, ((FuncExpr*)result)->args) { + Node* arg = (Node*)lfirst(args); + if (IsA(arg, CursorExpression)) { + if (i != seq) { + dopControl.CloseSmp(); + } + lfirst(args) = transformCursorExpression(pstate, (CursorExpression*)arg); + dopControl.ResetSmp(); + } + i++; + } + } + if (IsStartWithFunction((FuncExpr*)result) && !pstate->p_hasStartWith && !pstate->p_split_where_for_swcb) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_OPT), @@ -3871,6 +3897,7 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur ListCell* raw_parsetree_cell = NULL; List* stmt_list = NIL; ParseState* parse_state_temp = NULL; + int level = ++u_sess->parser_cxt.cursor_expr_level; ParseState* parse_state_parent = pstate; @@ -3894,6 +3921,15 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur plan_tree = pg_plan_query(query, 0, NULL); + if (IsA(plan_tree->planTree, Stream)) { + ((Stream*)plan_tree->planTree)->cursor_expr_level = level; + + /* reset cursor_expr_level */ + if (level == 1) { + u_sess->parser_cxt.cursor_expr_level = 0; + } + } + int nParamExec = 0; parse_state_temp = parse_state_parent; if (parse_state_temp != NULL) { @@ -4093,3 +4129,18 @@ static Node *transformStartWithWhereClauseColumnRef(ParseState *pstate, ColumnRe return NULL; } +PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr) +{ + ListCell* lc = NULL; + foreach (lc, funcexpr->args) { + Node* arg = (Node*)lfirst(lc); + if (IsA(arg, CursorExpression)) { + CursorExpression* cursorExpr = (CursorExpression*)arg; + PlannedStmt* cursorPlan = (PlannedStmt*)cursorExpr->plan; + if (IsA(cursorPlan->planTree, Stream)) { + return cursorPlan; + } + } + } + return NULL; +} \ No newline at end of file diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp index 6a0a2bdbf6..df06b36f28 100755 --- a/src/common/backend/parser/parse_relation.cpp +++ b/src/common/backend/parser/parse_relation.cpp @@ -1764,6 +1764,17 @@ RangeTblEntry* addRangeTableEntryForFunction( rte->funccolcollations = NIL; rte->alias = alias; + /* + * create_functionscan_path need cursorDop to determine + * wheather functionscan smp or not. + */ + if (IsA(funcexpr, FuncExpr)) { + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr); + if (cursorPstmt != NULL) { + rte->cursorDop = cursorPstmt->planTree->lefttree->dop; + } + } + eref = makeAlias(alias ? alias->aliasname : funcname, NIL); rte->eref = eref; diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 1517a80b46..74732fedb6 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -52,6 +52,7 @@ #include "catalog/gs_encrypted_proc.h" #include "catalog/gs_encrypted_columns.h" #include "catalog/gs_package.h" +#include "catalog/pg_proc_ext.h" #include "commands/comment.h" #include "commands/defrem.h" #include "commands/tablespace.h" @@ -66,6 +67,7 @@ #endif #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" +#include "nodes/parsenodes_common.h" #include "optimizer/clauses.h" #include "optimizer/tlist.h" #include "parser/keywords.h" @@ -254,6 +256,7 @@ static text* pg_get_expr_worker(text* expr, Oid relid, const char* relname, int static int print_function_arguments(StringInfo buf, HeapTuple proctup, bool print_table_args, bool print_defaults); static void print_function_ora_arguments(StringInfo buf, HeapTuple proctup); static void print_function_rettype(StringInfo buf, HeapTuple proctup); +static void print_parallel_enable(StringInfo buf, HeapTuple procTup, int2 parallelCursorSeq, Oid funcid); static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps); #ifdef PGXC static void set_deparse_plan(deparse_namespace* dpns, Plan* plan); @@ -4795,6 +4798,11 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) else if (!proIsProcedure) appendStringInfoString(&buf, " NOT SHIPPABLE"); + int2 parallelCursorSeq = GetParallelCursorSeq(funcid); + if (parallelCursorSeq != -1) { + print_parallel_enable(&buf, proctup, parallelCursorSeq, funcid); + } + Datum propackage = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_package, &isnull); if (!isnull && DatumGetBool(propackage)) appendStringInfoString(&buf, " PACKAGE"); @@ -5135,6 +5143,39 @@ static int print_function_arguments(StringInfo buf, HeapTuple proctup, bool prin return argsprinted; } +static void print_parallel_enable(StringInfo buf, HeapTuple procTup, int2 parallelCursorSeq, Oid funcid) +{ + bool isNull = false; + /* Get argument names, if available */ + Datum proargnames = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_proargnames, &isNull); + Datum* elems = NULL; + int nelems; + + Assert(!isNull); + deconstruct_array(DatumGetArrayTypeP(proargnames), TEXTOID, -1, false, 'i', &elems, NULL, &nelems); + + appendStringInfo(buf, " PARALLEL_ENABLE (PARTITION %s BY ", TextDatumGetCString(elems[parallelCursorSeq])); + + FunctionPartitionStrategy strategy; + List* partkey = NIL; + strategy = GetParallelStrategyAndKey(funcid, &partkey); + + if (strategy == FUNC_PARTITION_ANY) { + appendStringInfoString(buf, "ANY)"); + } else if (strategy == FUNC_PARTITION_HASH) { + appendStringInfoString(buf, "HASH("); + ListCell* lc = NULL; + foreach (lc, partkey) { + char* keyName = (char*)lfirst(lc); + if (lnext(lc) != NULL) { + appendStringInfo(buf, "%s,", keyName); + } else { + appendStringInfo(buf, "%s))", keyName); + } + } + } +} + /* * deparse_expression - General utility for deparsing expressions * diff --git a/src/common/backend/utils/cache/relcache.cpp b/src/common/backend/utils/cache/relcache.cpp index 52d36ffe95..f40a3db4fc 100755 --- a/src/common/backend/utils/cache/relcache.cpp +++ b/src/common/backend/utils/cache/relcache.cpp @@ -94,6 +94,7 @@ #include "catalog/pg_partition.h" #include "catalog/pg_pltemplate.h" #include "catalog/pg_proc.h" +#include "catalog/pg_proc_ext.h" #include "catalog/gs_package.h" #include "catalog/pg_publication.h" #include "catalog/pg_publication_rel.h" @@ -349,6 +350,7 @@ static const FormData_pg_attribute Desc_pg_replication_origin[Natts_pg_replicati }; static const FormData_pg_attribute Desc_pg_subscription_rel[Natts_pg_subscription_rel] = {Schema_pg_subscription_rel}; static const FormData_pg_attribute Desc_gs_sql_patch_origin[Natts_gs_sql_patch] = {Schema_gs_sql_patch}; +static const FormData_pg_attribute Desc_pg_proc_ext[Natts_pg_proc_ext] = {Schema_pg_proc_ext}; /* Please add to the array in ascending order of oid value */ static struct CatalogRelationBuildParam catalogBuildParam[CATALOG_NUM] = {{DefaultAclRelationId, @@ -751,6 +753,15 @@ static struct CatalogRelationBuildParam catalogBuildParam[CATALOG_NUM] = {{Defau Desc_pg_app_workloadgroup_mapping, false, true}, + {ProcedureExtensionRelationId, + "pg_proc_ext", + ProcedureExtensionRelationId_Rowtype_Id, + false, + false, + Natts_pg_proc_ext, + Desc_pg_proc_ext, + false, + true}, {EnumRelationId, "pg_enum", EnumRelation_Rowtype_Id, false, true, Natts_pg_enum, Desc_pg_enum, false, true}, {SetRelationId, "pg_set", SetRelation_Rowtype_Id, false, true, Natts_pg_set, Desc_pg_set, false, true}, {RangeRelationId, "pg_range", RangeRelation_Rowtype_Id, false, false, Natts_pg_range, Desc_pg_range, false, true}, diff --git a/src/common/backend/utils/cache/syscache.cpp b/src/common/backend/utils/cache/syscache.cpp index fa1271bb6f..a3873cb470 100644 --- a/src/common/backend/utils/cache/syscache.cpp +++ b/src/common/backend/utils/cache/syscache.cpp @@ -70,6 +70,7 @@ #include "catalog/pg_partition_fn.h" #include "catalog/pg_hashbucket.h" #include "catalog/pg_proc.h" +#include "catalog/pg_proc_ext.h" #include "catalog/gs_package.h" #include "catalog/pg_range.h" #include "catalog/pg_rewrite.h" @@ -645,12 +646,12 @@ const cachedesc cacheinfo[] = { 1, {Anum_streaming_cont_query_matrelid, 0, 0, 0}, STREAMING_CONT_QUERY_MATRELID_INDEX_ID_NBUCKETS}, +#ifdef ENABLE_MULTIPLE_NODES {StreamingContQueryRelationId, /* STREAMCQOID */ StreamingContQueryOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, STREAMING_CONT_QUERY_OID_INDEX_ID_NBUCKETS}, -#ifdef ENABLE_MULTIPLE_NODES {StreamingContQueryRelationId, /* STREAMCQRELID */ StreamingContQueryRelidIndexId, 1, @@ -663,6 +664,12 @@ const cachedesc cacheinfo[] = { STREAMING_CONT_QUERY_SCHEMA_CHANGE_INDEX_ID_NBUCKETS}, #endif #ifndef ENABLE_MULTIPLE_NODES + {ProcedureExtensionRelationId, /* PROCEDUREEXTENSIONOID */ + ProcExtProcOidIndexId, + 1, + {Anum_pg_proc_ext_proc_oid, 0, 0}, + 8 + }, {EventTriggerRelationId, /* EVENTTRIGGERNAME */ EventTriggerNameIndexId, 1, diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index c7208a2866..1bce896181 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -75,13 +75,13 @@ bool will_shutdown = false; * NEXT | 92899 | ? | ? * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92940; +const uint32 GRAND_VERSION_NUM = 92941; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ - +const uint32 PARALLEL_ENABLE_VERSION_NUM = 92941; const uint32 NAN_INFINITE_TEST_EXPR = 92940; const uint32 STRAIGHT_JOIN_VERSION_NUMBER = 92939; const uint32 INSERT_INTO_SELECT_VERSION_NUM = 92938; diff --git a/src/common/interfaces/libpq/frontend_parser/gram.y b/src/common/interfaces/libpq/frontend_parser/gram.y index faafce763d..8d8fa45252 100755 --- a/src/common/interfaces/libpq/frontend_parser/gram.y +++ b/src/common/interfaces/libpq/frontend_parser/gram.y @@ -565,7 +565,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER OUTFILE - PACKAGE PACKAGES PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION + PACKAGE PACKAGES PARALLEL_ENABLE PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION PIPELINED /* PGXC_BEGIN */ POOL PRECEDING PRECISION @@ -11895,6 +11895,7 @@ unreserved_keyword: | OWNER | OUTFILE | PACKAGE + | PARALLEL_ENABLE | PARSER | PARTIAL %prec PARTIAL_EMPTY_PREC | PARTITION diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 53550cd02b..0abb099cff 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -271,6 +271,7 @@ static void check_record_nest_tableof_index(PLpgSQL_datum* datum); static void check_tableofindex_args(int tableof_var_dno, Oid argtype); static bool need_build_row_for_func_arg(PLpgSQL_rec **rec, PLpgSQL_row **row, int out_arg_num, int all_arg, int *varnos, char *p_argmodes); static void processFunctionRecordOutParam(int varno, Oid funcoid, int* outparam); +static void CheckParallelCursorOpr(PLpgSQL_stmt_fetch* fetch); %} %expect 0 @@ -5870,6 +5871,8 @@ stmt_fetch : K_FETCH opt_fetch_direction cursor_variable K_INTO fetch->bulk_collect = false; fetch->sqlString = plpgsql_get_curline_query(); + CheckParallelCursorOpr(fetch); + $$ = (PLpgSQL_stmt *)fetch; } | K_FETCH opt_fetch_direction cursor_variable K_BULK K_COLLECT K_INTO fetch_into_target fetch_limit_expr @@ -5904,6 +5907,8 @@ stmt_fetch : K_FETCH opt_fetch_direction cursor_variable K_INTO fetch->bulk_collect = true; fetch->sqlString = plpgsql_get_curline_query(); + CheckParallelCursorOpr(fetch); + $$ = (PLpgSQL_stmt *)fetch; } ; @@ -5918,6 +5923,8 @@ stmt_move : K_MOVE opt_fetch_direction cursor_variable ';' fetch->bulk_collect = false; fetch->sqlString = plpgsql_get_curline_query(); + CheckParallelCursorOpr(fetch); + $$ = (PLpgSQL_stmt *)fetch; } ; @@ -14336,3 +14343,28 @@ static void processFunctionRecordOutParam(int varno, Oid funcoid, int* outparam) } } } + +/* + * If the cursor is specified by PARALLEL_ENABLE PARTITION BY, + * only FETCH CURSOR support in function body. + */ +static void CheckParallelCursorOpr(PLpgSQL_stmt_fetch* fetch) +{ + AssertEreport(u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[fetch->curvar]->dtype == PLPGSQL_DTYPE_VAR, + MOD_PLSQL, + "Cursor which would be fetched should be var"); + + PLpgSQL_var* var = (PLpgSQL_var*)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[fetch->curvar]; + if (u_sess->plsql_cxt.parallel_cursor_arg_name == NULL || + strcmp(var->varname, u_sess->plsql_cxt.parallel_cursor_arg_name) != 0) { + return; + } + + if (fetch->bulk_collect) { + return; + } + + if (fetch->direction != FETCH_FORWARD || fetch->expr != NULL || fetch->is_move) { + ereport(ERROR, (errmsg("only support FETCH CURSOR for parallel cursor \"%s\"", var->varname))); + } +} diff --git a/src/gausskernel/optimizer/commands/functioncmds.cpp b/src/gausskernel/optimizer/commands/functioncmds.cpp index 571dedb611..779cbacf36 100644 --- a/src/gausskernel/optimizer/commands/functioncmds.cpp +++ b/src/gausskernel/optimizer/commands/functioncmds.cpp @@ -49,6 +49,7 @@ #include "catalog/pg_namespace.h" #include "catalog/pg_object.h" #include "catalog/pg_proc.h" +#include "catalog/pg_proc_ext.h" #include "catalog/gs_package.h" #include "catalog/pg_proc_fn.h" #include "catalog/pg_synonym.h" @@ -594,7 +595,8 @@ void examine_parameter_list(List* parameters, Oid languageOid, const char* query */ static bool compute_common_attribute(DefElem* defel, DefElem** volatility_item, DefElem** strict_item, DefElem** security_item, DefElem** leakproof_item, List** set_items, DefElem** cost_item, DefElem** rows_item, - DefElem** fencedItem, DefElem** shippable_item, DefElem** package_item, DefElem** pipelined_item) + DefElem** fencedItem, DefElem** shippable_item, DefElem** package_item, DefElem** pipelined_item, + DefElem** parallel_enable_item) { if (strcmp(defel->defname, "volatility") == 0) { if (*volatility_item) @@ -648,6 +650,11 @@ static bool compute_common_attribute(DefElem* defel, DefElem** volatility_item, goto duplicate_error; *pipelined_item = defel; + } else if (strcmp(defel->defname, "parallel_enable") == 0) { + if (*parallel_enable_item) + goto duplicate_error; + + *parallel_enable_item = defel; } else return false; @@ -717,7 +724,8 @@ static bool compute_b_attribute(DefElem* defel) */ static List* compute_attributes_sql_style(const List* options, List** as, char** language, bool* windowfunc_p, char* volatility_p, bool* strict_p, bool* security_definer, bool* leakproof_p, ArrayType** proconfig, - float4* procost, float4* prorows, bool* fenced, bool* shippable, bool* package, bool* is_pipelined) + float4* procost, float4* prorows, bool* fenced, bool* shippable, bool* package, bool* is_pipelined, + FunctionPartitionInfo** partInfo) { ListCell* option = NULL; DefElem* as_item = NULL; @@ -734,6 +742,7 @@ static List* compute_attributes_sql_style(const List* options, List** as, char** DefElem* shippable_item = NULL; DefElem* package_item = NULL; DefElem* pipelined_item = NULL; + DefElem* parallel_enable_item = NULL; List* bCompatibilities = NIL; foreach (option, options) { DefElem* defel = (DefElem*)lfirst(option); @@ -761,7 +770,8 @@ static List* compute_attributes_sql_style(const List* options, List** as, char** &fencedItem, &shippable_item, &package_item, - &pipelined_item)) { + &pipelined_item, + ¶llel_enable_item)) { /* recognized common option */ continue; } else if (compute_b_attribute(defel)) { @@ -843,6 +853,18 @@ static List* compute_attributes_sql_style(const List* options, List** as, char** if (package_item != NULL) { *package = intVal(package_item->arg); } + + if (parallel_enable_item != NULL) { + if (volatility_item == NULL) { + *volatility_p = PROVOLATILE_IMMUTABLE; + ereport(NOTICE, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("immutable would be set if parallel_enable specified"))); + } else if (*volatility_p != PROVOLATILE_IMMUTABLE) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("only immutable can be set if parallel_enable specified"))); + } + *partInfo = (FunctionPartitionInfo*)parallel_enable_item->arg; + } list_free(set_items); return bCompatibilities; } @@ -1053,6 +1075,7 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, bool fenced = IS_SINGLE_NODE ? false : true; bool shippable = false; bool package = false; + FunctionPartitionInfo* partInfo = NULL; bool proIsProcedure = stmt->isProcedure; if (!OidIsValid(pkg_oid)) { u_sess->plsql_cxt.debug_query_string = pstrdup(queryString); @@ -1156,7 +1179,7 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, List *functionOptions = compute_attributes_sql_style((const List *)stmt->options, &as_clause, &language, &isWindowFunc, &volatility, &isStrict, &security, &isLeakProof, &proconfig, &procost, &prorows, &fenced, &shippable, &package, - &isPipelined); + &isPipelined, &partInfo); pipelined_function_sanity_check(stmt, isPipelined); @@ -1320,6 +1343,28 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, CheckWindowFuncValid(languageOid, prosrc_str); } + if (partInfo != NULL) { + int numargs; + Datum* argnames = NULL; + int i = 0; + deconstruct_array(parameterNames, TEXTOID, -1, false, 'i', &argnames, NULL, &numargs); + for (i = 0; i < numargs; i++) { + char* pname = TextDatumGetCString(argnames[i]); + + if (strcmp(partInfo->partitionCursor, pname) == 0) { + partInfo->partitionCursorIndex = i; + break; + } + } + + if (i == numargs || parameterTypes->values[i] != REFCURSOROID) { + ereport(ERROR, (errmsg("partition expr must be cursor-type parameter"))); + } + + u_sess->plsql_cxt.parallel_cursor_arg_name = MemoryContextStrdup( + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR), partInfo->partitionCursor); + } + /* * And now that we have all the parameters, and know we're permitted to do * so, go ahead and create the function. @@ -1354,7 +1399,8 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, param_type_depend_ext, ret_type_depend_ext, stmt, - isPipelined); + isPipelined, + partInfo); CreateFunctionComment(address.objectId, functionOptions); pfree_ext(param_type_depend_ext); @@ -1719,6 +1765,9 @@ void RemoveFunctionById(Oid funcOid) heap_close(relation, RowExclusiveLock); } + /* delete pg_proc_ext tuple */ + DeletePgProcExt(funcOid); + /* Recode time of delete function. */ if (funcOid != InvalidOid) { DeletePgObject(funcOid, OBJECT_TYPE_PROC); @@ -2605,7 +2654,8 @@ ObjectAddress AlterFunction(AlterFunctionStmt* stmt) &fencedItem, &shippable_item, &package_item, - &pipelined_item)) { + &pipelined_item, + NULL)) { continue; } else if (compute_b_attribute(defel)) { /* recognized b compatibility options */ @@ -2718,6 +2768,11 @@ ObjectAddress AlterFunction(AlterFunctionStmt* stmt) CreateFunctionComment(funcOid, alterOptions, true); + /* if non-immutable is specified, clear parallel_enable info */ + if (procForm->provolatile != PROVOLATILE_IMMUTABLE) { + DeletePgProcExt(funcOid); + } + /* Recode time of alter funciton. */ if (OidIsValid(funcOid)) { UpdatePgObjectMtime(funcOid, OBJECT_TYPE_PROC); diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index 1e5dd03e16..d39559fc98 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -30,6 +30,7 @@ #include "catalog/pg_namespace.h" #include "catalog/pg_opfamily.h" #include "catalog/pgxc_group.h" +#include "catalog/pg_proc_ext.h" #include "foreign/fdwapi.h" #include "foreign/foreign.h" #include "miscadmin.h" @@ -3270,7 +3271,7 @@ static FunctionScan* create_functionscan_plan(PlannerInfo* root, Path* best_path ng_convert_to_exec_nodes(&best_path->distribution, best_path->locator_type, RELATION_ACCESS_READ); } #else - scan_plan->scan.plan.exec_type = EXEC_ON_ALL_NODES; + scan_plan->scan.plan.exec_type = EXEC_ON_DATANODES; scan_plan->scan.plan.exec_nodes = ng_convert_to_exec_nodes(&best_path->distribution, best_path->locator_type, RELATION_ACCESS_READ); #endif @@ -6210,6 +6211,40 @@ static FunctionScan* make_functionscan(List* qptlist, List* qpqual, Index scanre node->funccoltypmods = funccoltypmods; node->funccolcollations = funccolcollations; + if (IS_STREAM_PLAN && u_sess->opt_cxt.query_dop > 1) { + FunctionPartitionStrategy strategy; + List* partkey = NIL; + strategy = GetParallelStrategyAndKey(((FuncExpr*)funcexpr)->funcid, &partkey); + + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr); + if (cursorPstmt != NULL) { + Plan* cursorPlan = cursorPstmt->planTree; + Stream* stream = (Stream*)cursorPlan; + + /* set plan->dop according to cursorplan */ + inherit_plan_locator_info(plan, cursorPlan->lefttree); + stream->smpDesc.consumerDop = plan->dop; + + /* if FUNC_PARTITION_HASH is specified, set distributed_keys and distriType */ + if (strategy == FUNC_PARTITION_HASH && partkey != NIL) { + ListCell* lc1 = NULL; + foreach (lc1, cursorPlan->targetlist) { + TargetEntry* entry = (TargetEntry*)lfirst(lc1); + ListCell* lc2 = NULL; + foreach (lc2, partkey) { + if (strcmp(entry->resname, (char*)lfirst(lc2)) == 0) { + stream->distribute_keys = lappend(stream->distribute_keys, entry->expr); + break; + } + } + } + plan->distributed_keys = stream->distribute_keys; + stream->smpDesc.distriType = list_length(plan->distributed_keys) > 0 ? + LOCAL_DISTRIBUTE : stream->smpDesc.distriType; + } + } + } + return node; } diff --git a/src/gausskernel/optimizer/plan/streamplan_utils.cpp b/src/gausskernel/optimizer/plan/streamplan_utils.cpp index 8e2499a434..fa2f3fa70d 100755 --- a/src/gausskernel/optimizer/plan/streamplan_utils.cpp +++ b/src/gausskernel/optimizer/plan/streamplan_utils.cpp @@ -27,6 +27,7 @@ #include "parser/parse_clause.h" #include "parser/parse_oper.h" #include "parser/parse_relation.h" +#include "parser/parse_expr.h" #include "pgxc/groupmgr.h" #include "pgxc/poolmgr.h" #include "pgxc/pruningslice.h" @@ -1854,6 +1855,13 @@ void finalize_node_id(Plan* result_plan, int* plan_node_id, int* parent_node_id, *parent_node_id = save_parent_id; } } break; + case T_FunctionScan: { + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)((FunctionScan*)result_plan)->funcexpr); + if (cursorPstmt != NULL) { + Stream* stream = (Stream*)cursorPstmt->planTree; + stream->cursor_owner_node_id = result_plan->plan_node_id; + } + } break; default: if (result_plan->lefttree) { diff --git a/src/gausskernel/optimizer/util/pathnode.cpp b/src/gausskernel/optimizer/util/pathnode.cpp index 54e2891c56..969bdcd74c 100755 --- a/src/gausskernel/optimizer/util/pathnode.cpp +++ b/src/gausskernel/optimizer/util/pathnode.cpp @@ -3713,6 +3713,7 @@ Path* create_functionscan_path(PlannerInfo* root, RelOptInfo* rel, Relids requir pathnode->param_info = get_baserel_parampathinfo(root, rel, required_outer); pathnode->pathkeys = NIL; /* for now, assume unordered result */ pathnode->exec_type = SetBasePathExectype(root, rel); + pathnode->dop = rel->cursorDop; #ifdef STREAMPLAN pathnode->distribute_keys = rel->distribute_keys; diff --git a/src/gausskernel/optimizer/util/pgxcship.cpp b/src/gausskernel/optimizer/util/pgxcship.cpp index a877a2cdec..f347eee247 100644 --- a/src/gausskernel/optimizer/util/pgxcship.cpp +++ b/src/gausskernel/optimizer/util/pgxcship.cpp @@ -2522,7 +2522,8 @@ bool pgxc_is_func_shippable(Oid funcid, shipping_context* context) return true; } #endif - if (proshippable) { + /* centralized scenarios do not use proshippable to determine */ + if (IS_PGXC_COORDINATOR && proshippable) { return true; } } else if (PROVOLATILE_VOLATILE == provolatile && context != NULL && !context->disallow_volatile_func_shippable) { @@ -2553,7 +2554,8 @@ bool pgxc_is_func_shippable(Oid funcid, shipping_context* context) break; } - if (proshippable) { + /* centralized scenarios does not use proshippable to determine */ + if (IS_PGXC_COORDINATOR && proshippable) { return true; } } diff --git a/src/gausskernel/optimizer/util/relnode.cpp b/src/gausskernel/optimizer/util/relnode.cpp index 4ce26b2121..3dd1e2bd2f 100755 --- a/src/gausskernel/optimizer/util/relnode.cpp +++ b/src/gausskernel/optimizer/util/relnode.cpp @@ -363,8 +363,11 @@ RelOptInfo* build_simple_rel(PlannerInfo* root, int relid, RelOptKind reloptkind /* Save the finished struct in the query's simple_rel_array */ root->simple_rel_array[relid] = rel; - if (rel->rtekind == RTE_RELATION) + if (rel->rtekind == RTE_RELATION) { set_local_rel_size(root, rel); + } else if (rel->rtekind == RTE_FUNCTION) { + rel->cursorDop = rte->cursorDop; + } /* * This is a convenient spot at which to note whether rels participating diff --git a/src/gausskernel/process/stream/execStream.cpp b/src/gausskernel/process/stream/execStream.cpp index d6654253d0..ec9d7a31ee 100755 --- a/src/gausskernel/process/stream/execStream.cpp +++ b/src/gausskernel/process/stream/execStream.cpp @@ -41,6 +41,7 @@ #include "pgxc/pgxcnode.h" #include "parser/parse_type.h" #include "parser/parsetree.h" +#include "parser/parse_expr.h" #include "utils/memutils.h" #include "commands/dbcommands.h" #include "miscadmin.h" @@ -619,7 +620,8 @@ static void InitStream(StreamFlowCtl* ctl, StreamTransType transType) key.queryId = pstmt->queryId; key.planNodeId = plan->plan_node_id; - + key.cursorExprLevel = streamNode->cursor_expr_level; + key.cursorParentNodeId = streamNode->cursor_owner_node_id; /* * MPPDB with-recursive support */ @@ -769,7 +771,8 @@ static void InitStream(StreamFlowCtl* ctl, StreamTransType transType) /* Set smp identifier. */ key.smpIdentifier = i; producer = New(u_sess->stream_cxt.stream_runtime_mem_cxt) StreamProducer( - key, pstmt, streamNode, u_sess->stream_cxt.stream_runtime_mem_cxt, producerConnNum, transType); + key, ctl->cursorPstmt != NULL ? ctl->cursorPstmt : pstmt, streamNode, + u_sess->stream_cxt.stream_runtime_mem_cxt, producerConnNum, transType); producer->setSharedContext(sharedContext); producer->setUniqueSQLKey(u_sess->unique_sql_cxt.unique_sql_id, u_sess->unique_sql_cxt.unique_sql_user_id, u_sess->unique_sql_cxt.unique_sql_cn_id); @@ -967,6 +970,17 @@ static void InitStreamFlow(StreamFlowCtl* ctl) ctl->plan = oldPlan->righttree; InitStreamFlow(ctl); } break; + case T_FunctionScan: { + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)((FunctionScan*)oldPlan)->funcexpr); + if (cursorPstmt != NULL) { + Stream* cursorPlan = (Stream*)(cursorPstmt->planTree); + ctl->plan = (Plan*)cursorPlan; + ctl->cursorPstmt = cursorPstmt; + + InitStreamFlow(ctl); + break; + } + } break; default: if (oldPlan->lefttree) { ctl->plan = oldPlan->lefttree; @@ -1085,6 +1099,7 @@ void BuildStreamFlow(PlannedStmt* plan) ctl.subConsumerList = &topConsumerList; ctl.threadNum = &threadNum; ctl.dummyThread = ThreadIsDummy(plan->planTree); + ctl.cursorPstmt = NULL; /* Init check info. */ SetCheckInfo(&ctl.checkInfo, plan->planTree); @@ -1176,6 +1191,8 @@ void SetupStreamRuntime(StreamState* node) key.queryId = node->ss.ps.state->es_plannedstmt->queryId; key.planNodeId = streamNode->scan.plan.plan_node_id; + key.cursorExprLevel = streamNode->cursor_expr_level; + key.cursorParentNodeId = streamNode->cursor_owner_node_id; Assert(u_sess->stream_cxt.global_obj != NULL); pair = u_sess->stream_cxt.global_obj->popStreamPair(key); @@ -1214,6 +1231,8 @@ static void StartupStreamThread(StreamState* node) key.queryId = node->ss.ps.state->es_plannedstmt->queryId; key.planNodeId = node->ss.ps.plan->plan_node_id; + key.cursorExprLevel = ((Stream*)node->ss.ps.plan)->cursor_expr_level; + key.cursorParentNodeId = ((Stream*)node->ss.ps.plan)->cursor_owner_node_id; Assert(u_sess->stream_cxt.global_obj != NULL); pair = u_sess->stream_cxt.global_obj->popStreamPair(key); Assert(pair->producerList != NULL); diff --git a/src/gausskernel/process/stream/streamCore.cpp b/src/gausskernel/process/stream/streamCore.cpp index c5f33f79f3..375a888d88 100755 --- a/src/gausskernel/process/stream/streamCore.cpp +++ b/src/gausskernel/process/stream/streamCore.cpp @@ -751,7 +751,9 @@ StreamPair* StreamNodeGroup::popStreamPair(StreamKey key) foreach (cell, m_streamPairList) { pair = (StreamPair*)lfirst(cell); - if (pair->key.queryId == key.queryId && pair->key.planNodeId == key.planNodeId) + if (pair->key.queryId == key.queryId && pair->key.planNodeId == key.planNodeId && + pair->key.cursorExprLevel == key.cursorExprLevel && + pair->key.cursorParentNodeId == key.cursorParentNodeId) return pair; } diff --git a/src/gausskernel/process/stream/streamMain.cpp b/src/gausskernel/process/stream/streamMain.cpp index 312027c263..2a41e54ba7 100755 --- a/src/gausskernel/process/stream/streamMain.cpp +++ b/src/gausskernel/process/stream/streamMain.cpp @@ -296,7 +296,7 @@ void ExtractProduerInfo() u_sess->proc_cxt.MyProcPort->user_name = u_sess->stream_cxt.producer_obj->getUserName(); /* runtimethreadinstr */ - if (u_sess->instr_cxt.global_instr) { + if (u_sess->instr_cxt.global_instr && u_sess->stream_cxt.producer_obj->getCursorExprLevel() == 0) { Assert(u_sess->instr_cxt.thread_instr == NULL); int segmentId = u_sess->stream_cxt.producer_obj->getPlan()->planTree->plan_node_id; u_sess->instr_cxt.thread_instr = u_sess->instr_cxt.global_instr->allocThreadInstrumentation(segmentId); @@ -779,8 +779,9 @@ void StreamExit() closeAllVfds(); - AtProcExit_Buffers(0, 0); + /* ShutdownPostgres would release buffer under AbortOutOfAnyTransaction */ ShutdownPostgres(0, 0); + AtProcExit_Buffers(0, 0); if(!EnableLocalSysCache()) { AtProcExit_Files(0, 0); } diff --git a/src/gausskernel/process/stream/streamProducer.cpp b/src/gausskernel/process/stream/streamProducer.cpp index 9ad1c0a361..4034f040e8 100755 --- a/src/gausskernel/process/stream/streamProducer.cpp +++ b/src/gausskernel/process/stream/streamProducer.cpp @@ -1562,6 +1562,11 @@ WLMGeneralParam StreamProducer::getWlmParams() return m_wlmParams; } +int StreamProducer::getCursorExprLevel() +{ + return m_key.cursorExprLevel; +} + uint32 StreamProducer::getExplainThreadid() { return m_explain_thread_id; diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index f49e558cf0..1a8ea3221d 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -8404,6 +8404,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam u_sess->parser_cxt.has_set_uservar = false; u_sess->parser_cxt.has_equal_uservar = false; u_sess->parser_cxt.stmt = NULL; + u_sess->parser_cxt.cursor_expr_level = 0; OpFusion::tearDown(u_sess->exec_cxt.CurrentOpFusionObj); /* init pbe execute status when long jump */ u_sess->xact_cxt.pbe_execute_complete = true; diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 96d8a737ea..42362a9387 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -882,6 +882,7 @@ static void knl_u_plpgsql_init(knl_u_plpgsql_context* plsql_cxt) plsql_cxt->isCreatePkgFunction = false; plsql_cxt->currCompilingObjStatus = true; plsql_cxt->need_init = true; + plsql_cxt->parallel_cursor_arg_name = NULL; } static void knl_u_stat_init(knl_u_stat_context* stat_cxt) diff --git a/src/gausskernel/storage/ipc/procsignal.cpp b/src/gausskernel/storage/ipc/procsignal.cpp index 1879d85dcc..6fe1e33342 100755 --- a/src/gausskernel/storage/ipc/procsignal.cpp +++ b/src/gausskernel/storage/ipc/procsignal.cpp @@ -286,8 +286,8 @@ void procsignal_sigusr1_handler(SIGNAL_ARGS) WLMCheckSigRecvData(); if (CheckProcSignal(PROCSIG_SPACE_LIMIT)) WLMCheckSpaceLimit(); -#if (!defined ENABLE_MULTIPLE_NODES) && (!defined USE_SPQ) - if (CheckProcSignal(PROCSIG_STREAM_STOP_CHECK)) +#ifndef ENABLE_MULTIPLE_NODES + if (CheckProcSignal(PROCSIG_STREAM_STOP_CHECK) && !IS_SPQ_RUNNING) StreamMarkStop(); #endif #endif diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 59ba1b2158..8397776528 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -62,6 +62,6 @@ #define NAILED_IN_CATALOG_NUM 8 -#define CATALOG_NUM 109 +#define CATALOG_NUM 110 #endif diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h index 679453fb01..519552d8cb 100644 --- a/src/include/catalog/indexing.h +++ b/src/include/catalog/indexing.h @@ -690,6 +690,9 @@ DECLARE_UNIQUE_INDEX(pg_event_trigger_evtname_index, 3486, on pg_event_trigger u DECLARE_UNIQUE_INDEX(pg_event_trigger_oid_index, 3487, on pg_event_trigger using btree(oid oid_ops)); #define EventTriggerOidIndexId 3487 +DECLARE_UNIQUE_INDEX(pg_proc_ext_proc_oid_index, 3488, on pg_proc_ext using btree(proc_oid oid_ops)); +#define ProcExtProcOidIndexId 3488 + /* last step of initialization script: build the indexes declared above */ BUILD_INDICES diff --git a/src/include/catalog/pg_proc_ext.h b/src/include/catalog/pg_proc_ext.h new file mode 100644 index 0000000000..46c8d52f88 --- /dev/null +++ b/src/include/catalog/pg_proc_ext.h @@ -0,0 +1,65 @@ +/*------------------------------------------------------------------------- + * + * pg_proc_ext.h + * extension of pg_proc + * + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/catalog/pg_proc_ext.h + * + * NOTES + * the genbki.pl script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_PROC_EXT_H +#define PG_PROC_EXT_H + +#include "catalog/genbki.h" +#include "nodes/parsenodes_common.h" + +/* ---------------- + * pg_proc_ext definition. cpp turns this into + * typedef struct FormData_pg_proc_ext + * ---------------- + */ +#define ProcedureExtensionRelationId 3483 +#define ProcedureExtensionRelationId_Rowtype_Id 11663 + +CATALOG(pg_proc_ext,3483) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO +{ + Oid proc_oid; /* procedure oid */ + int2 parallel_cursor_seq; /* specify which cursor arg to be parallel for function */ + int2 parallel_cursor_strategy; /* specify what strategy to partition for parallel cursor */ +#ifdef CATALOG_VARLEN + text parallel_cursor_partkey[1]; /* specify what keys to partition for parallel cursor */ +#endif +} FormData_pg_proc_ext; + +/* ---------------- + * Form_pg_proc_ext corresponds to a pointer to a tuple with + * the format of pg_proc_ext relation. + * ---------------- + */ +typedef FormData_pg_proc_ext *Form_pg_proc_ext; + +/* ---------------- + * compiler constants for pg_proc_ext + * ---------------- + */ +#define Natts_pg_proc_ext 4 +#define Anum_pg_proc_ext_proc_oid 1 +#define Anum_pg_proc_ext_parallel_cursor_seq 2 +#define Anum_pg_proc_ext_parallel_cursor_strategy 3 +#define Anum_pg_proc_ext_parallel_cursor_partkey 4 + +extern void InsertPgProcExt(Oid oid, FunctionPartitionInfo* partInfo); +extern int2 GetParallelCursorSeq(Oid oid); +extern FunctionPartitionStrategy GetParallelStrategyAndKey(Oid oid, List** partkey); +extern void DeletePgProcExt(Oid oid); + +#endif /* PG_PROC_EXT_H */ + diff --git a/src/include/catalog/pg_proc_fn.h b/src/include/catalog/pg_proc_fn.h index 39fc69d458..6e77805fd1 100644 --- a/src/include/catalog/pg_proc_fn.h +++ b/src/include/catalog/pg_proc_fn.h @@ -53,7 +53,8 @@ extern ObjectAddress ProcedureCreate(const char *procedureName, TypeDependExtend* paramTypDependExt = NULL, TypeDependExtend* retTypDependExt = NULL, CreateFunctionStmt* stmt = NULL, - bool isPipelined = false + bool isPipelined = false, + FunctionPartitionInfo* partInfo = NULL ); extern bool function_parse_error_transpose(const char *prosrc); diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_941.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_941.sql new file mode 100644 index 0000000000..b8838c3a0e --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_941.sql @@ -0,0 +1,4 @@ +--rollback TABLE +DROP INDEX IF EXISTS pg_proc_ext_proc_oid_index; +DROP TYPE IF EXISTS pg_catalog.pg_proc_ext; +DROP TABLE IF EXISTS pg_catalog.pg_proc_ext; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_941.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_941.sql new file mode 100644 index 0000000000..b8838c3a0e --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_941.sql @@ -0,0 +1,4 @@ +--rollback TABLE +DROP INDEX IF EXISTS pg_proc_ext_proc_oid_index; +DROP TYPE IF EXISTS pg_catalog.pg_proc_ext; +DROP TABLE IF EXISTS pg_catalog.pg_proc_ext; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql new file mode 100644 index 0000000000..3f4ca4a4e9 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql @@ -0,0 +1,15 @@ +--upgrade TABLE +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 3483, 0, 0, 0; +CREATE TABLE IF NOT EXISTS pg_catalog.pg_proc_ext +( + proc_oid oid not null, + parallel_cursor_seq int2 not null, + parallel_cursor_strategy int2 not null, + parallel_cursor_partkey text[] not null +)WITH(oids=false) TABLESPACE pg_default; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3488; +CREATE INDEX pg_proc_ext_proc_oid_index ON pg_catalog.pg_proc_ext USING BTREE(proc_oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +GRANT SELECT ON pg_catalog.pg_proc_ext TO PUBLIC; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql new file mode 100644 index 0000000000..c9fb55f47c --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql @@ -0,0 +1,15 @@ +--upgrade TABLE +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 3483, 0, 0, 0; +CREATE TABLE IF NOT EXISTS pg_catalog.pg_proc_ext +( + proc_oid oid not null, + parallel_cursor_seq int2 not null, + parallel_cursor_strategy int2 not null, + parallel_cursor_partkey text[] not null +)WITH(oids=false) TABLESPACE pg_default; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3488; +CREATE INDEX pg_proc_ext_proc_oid_index ON pg_catalog.pg_proc_ext USING BTREE(proc_oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +GRANT SELECT ON pg_catalog.pg_event_trigger TO PUBLIC; diff --git a/src/include/distributelayer/streamCore.h b/src/include/distributelayer/streamCore.h index 76b455c353..a117a30326 100755 --- a/src/include/distributelayer/streamCore.h +++ b/src/include/distributelayer/streamCore.h @@ -72,6 +72,8 @@ typedef struct { uint64 queryId; /* u_sess->debug_query_id */ unsigned int planNodeId; unsigned int smpIdentifier; + int cursorExprLevel; + unsigned int cursorParentNodeId; } StreamKey; typedef union { diff --git a/src/include/distributelayer/streamProducer.h b/src/include/distributelayer/streamProducer.h index de2babd33c..5f5983b61e 100644 --- a/src/include/distributelayer/streamProducer.h +++ b/src/include/distributelayer/streamProducer.h @@ -198,6 +198,8 @@ public: /* Get the wlm params. */ WLMGeneralParam getWlmParams(); + int getCursorExprLevel(); + uint32 getExplainThreadid(); unsigned char getExplainTrack(); diff --git a/src/include/executor/exec/execStream.h b/src/include/executor/exec/execStream.h index 09f049dacd..e7ff763b47 100644 --- a/src/include/executor/exec/execStream.h +++ b/src/include/executor/exec/execStream.h @@ -70,6 +70,7 @@ typedef struct StreamFlowCtl { int* threadNum; /* The number of stream thread. */ bool dummyThread; /* If it's a dummy stream node? */ StreamFlowCheckInfo checkInfo; /* Check if consumer match producer between stream node. */ + PlannedStmt* cursorPstmt; } StreamFlowCtl; typedef union { diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 8dae1a5667..e0782ea9dc 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -462,6 +462,7 @@ typedef struct knl_u_parser_context { bool has_set_uservar; bool has_equal_uservar; bool is_straight_join; + int cursor_expr_level; } knl_u_parser_context; typedef struct knl_u_trigger_context { @@ -1734,6 +1735,7 @@ typedef struct knl_u_plpgsql_context { HTAB* plpgsql_lock_objects; bool need_init; + char* parallel_cursor_arg_name; } knl_u_plpgsql_context; //this is used to define functions in package diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index ce8b3908eb..6e07ede951 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -149,6 +149,7 @@ extern const uint32 CURSOR_EXPRESSION_VERSION_NUMBER; extern const uint32 ROTATE_UNROTATE_VERSION_NUM; extern const uint32 FLOAT_VERSION_NUMBER; extern const uint32 STRAIGHT_JOIN_VERSION_NUMBER; +extern const uint32 PARALLEL_ENABLE_VERSION_NUM; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 12819cea10..447e758ba0 100755 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -661,6 +661,7 @@ typedef enum NodeTag { T_SqlLoadColExpr, T_AutoIncrement, T_RenameCell, + T_FunctionPartitionInfo, /* * TAGS FOR REPLICATION GRAMMAR PARSE NODES (replnodes.h) */ diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index fd2099a58a..daea0a285f 100755 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -382,6 +382,7 @@ typedef struct RangeTblEntry { * Select * from table_name subpartition (subpartition_name); * or delete from table_name partition (partition_name, ...) */ + int cursorDop; /* for functionscan with cursor param */ } RangeTblEntry; /* diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h index 6ddb03ab9b..41c909360d 100644 --- a/src/include/nodes/parsenodes_common.h +++ b/src/include/nodes/parsenodes_common.h @@ -328,6 +328,19 @@ typedef struct SortBy { int location; /* operator location, or -1 if none/unknown */ } SortBy; +typedef enum FunctionPartitionStrategy { + FUNC_PARTITION_ANY = 0, + FUNC_PARTITION_HASH +} FunctionPartitionStrategy; + +typedef struct FunctionPartitionInfo { + NodeTag type; + FunctionPartitionStrategy strategy; + char* partitionCursor; + int partitionCursorIndex; + List* partitionCols; +} FunctionPartitionInfo; + /* * WindowDef - raw representation of WINDOW and OVER clauses * diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 53190e5288..855c207da8 100755 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -867,6 +867,7 @@ typedef struct RelOptInfo { unsigned int num_data_nodes = 0; //number of distributing data nodes List* partial_pathlist; /* partial Paths */ + int cursorDop; } RelOptInfo; /* diff --git a/src/include/optimizer/stream_cost.h b/src/include/optimizer/stream_cost.h index 4316d40fcb..cee93c4aa8 100644 --- a/src/include/optimizer/stream_cost.h +++ b/src/include/optimizer/stream_cost.h @@ -89,6 +89,8 @@ typedef struct Stream { #ifdef USE_SPQ int streamID; #endif + int cursor_expr_level; + int cursor_owner_node_id; } Stream; extern void compute_stream_cost(StreamType type, char locator_type, double subrows, double subgblrows, diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index 25d2f41eb3..eccacfd971 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -464,6 +464,7 @@ PG_KEYWORD("owned", OWNED, UNRESERVED_KEYWORD) PG_KEYWORD("owner", OWNER, UNRESERVED_KEYWORD) PG_KEYWORD("package", PACKAGE, UNRESERVED_KEYWORD) PG_KEYWORD("packages", PACKAGES, UNRESERVED_KEYWORD) +PG_KEYWORD("parallel_enable", PARALLEL_ENABLE, UNRESERVED_KEYWORD) PG_KEYWORD("parser", PARSER, UNRESERVED_KEYWORD) PG_KEYWORD("partial", PARTIAL, UNRESERVED_KEYWORD) PG_KEYWORD("partition", PARTITION, UNRESERVED_KEYWORD) diff --git a/src/include/parser/parse_expr.h b/src/include/parser/parse_expr.h index 1285460d35..763ae33979 100644 --- a/src/include/parser/parse_expr.h +++ b/src/include/parser/parse_expr.h @@ -39,5 +39,6 @@ extern bool IsQuerySWCBRewrite(Query *query); extern bool IsSWCBRewriteRTE(RangeTblEntry *rte); extern Datum GetTypeZeroValue(Form_pg_attribute att_tup); typedef Datum (*getTypeZeroValueFunc)(Form_pg_attribute att_tup); +extern PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr); #endif /* PARSE_EXPR_H */ diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h index 854013c35c..e0e626776c 100644 --- a/src/include/utils/syscache.h +++ b/src/include/utils/syscache.h @@ -128,12 +128,13 @@ enum SysCacheIdentifier { STREAMCQID, STREAMCQLOOKUPID, STREAMCQMATRELID, - STREAMCQOID, #ifdef ENABLE_MULTIPLE_NODES + STREAMCQOID, STREAMCQRELID, STREAMCQSCHEMACHANGE, #endif #ifndef ENABLE_MULTIPLE_NODES + PROCEDUREEXTENSIONOID, EVENTTRIGGERNAME, EVENTTRIGGEROID, #endif -- Gitee From 584d420639f0fa594fd4a34d2dbbb38120378360 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 15 Jul 2024 15:14:46 +0800 Subject: [PATCH 058/347] add testcase smp_cursor&¶llel_enable_function --- .../expected/parallel_enable_function.out | 670 ++++++++++++++++++ src/test/regress/expected/smp_cursor.out | 523 ++++++++++++++ src/test/regress/input/gs_dump_package.source | 42 ++ .../regress/output/gs_dump_package.source | 69 ++ src/test/regress/parallel_schedule0 | 2 +- src/test/regress/parallel_schedule0A | 2 +- .../regress/sql/parallel_enable_function.sql | 362 ++++++++++ src/test/regress/sql/smp_cursor.sql | 82 +++ 8 files changed, 1750 insertions(+), 2 deletions(-) create mode 100644 src/test/regress/expected/parallel_enable_function.out create mode 100644 src/test/regress/expected/smp_cursor.out create mode 100644 src/test/regress/sql/parallel_enable_function.sql create mode 100644 src/test/regress/sql/smp_cursor.sql diff --git a/src/test/regress/expected/parallel_enable_function.out b/src/test/regress/expected/parallel_enable_function.out new file mode 100644 index 0000000000..36f755b031 --- /dev/null +++ b/src/test/regress/expected/parallel_enable_function.out @@ -0,0 +1,670 @@ +create schema parallel_enable_function; +set search_path=parallel_enable_function; +create table employees (employee_id number(6), department_id NUMBER, first_name varchar2(30), last_name varchar2(30), email varchar2(30), phone_number varchar2(30)); +BEGIN + FOR i IN 1..100 LOOP + INSERT INTO employees VALUES (i, 60, 'abc', 'def', '123', '123'); + END LOOP; + COMMIT; +END; +/ +CREATE TYPE my_outrec_typ AS ( + employee_id numeric(6,0), + department_id numeric, + first_name character varying(30), + last_name character varying(30), + email character varying(30), + phone_number character varying(30) +); +-- create srf function with parallel_enable +CREATE OR REPLACE FUNCTION hash_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END hash_srf; +/ +NOTICE: immutable would be set if parallel_enable specified +CREATE OR REPLACE FUNCTION any_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END any_srf; +/ +NOTICE: immutable would be set if parallel_enable specified +-- create function with multi-partkey +CREATE OR REPLACE FUNCTION multi_partkey_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id, department_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END multi_partkey_srf; +/ +NOTICE: immutable would be set if parallel_enable specified +-- create pipelined function +create type table_my_outrec_typ is table of my_outrec_typ; +CREATE OR REPLACE FUNCTION pipelined_table_f (p SYS_REFCURSOR) RETURN table_my_outrec_typ pipelined parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + pipe row(out_rec); + END LOOP; +END pipelined_table_f; +/ +NOTICE: immutable would be set if parallel_enable specified +CREATE OR REPLACE FUNCTION pipelined_array_f (p SYS_REFCURSOR) RETURN _employees PIPELINED parallel_enable (partition p by any) + IS + in_rec my_outrec_typ; + BEGIN +LOOP + FETCH p INTO in_rec; + EXIT WHEN p%NOTFOUND; + PIPE ROW (in_rec); + END LOOP; +END pipelined_array_f; +/ +NOTICE: immutable would be set if parallel_enable specified +-- without partition by +CREATE OR REPLACE FUNCTION no_partition_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END no_partition_srf; +/ +NOTICE: immutable would be set if parallel_enable specified +-- call function +set query_dop = 1002; +explain (costs off) select * from hash_srf(cursor (select * from employees)) limit 10; + QUERY PLAN +---------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on hash_srf +(4 rows) + +select * from hash_srf(cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +explain (costs off) select * from any_srf(cursor (select * from employees)) limit 10; + QUERY PLAN +---------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on any_srf +(4 rows) + +select * from any_srf(cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +explain (costs off) select * from pipelined_table_f(cursor (select * from employees)) limit 10; + QUERY PLAN +------------------------------------------------------ + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on pipelined_table_f +(4 rows) + +select * from pipelined_table_f(cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +explain (costs off) select * from multi_partkey_srf(cursor (select * from employees)) limit 10; + QUERY PLAN +------------------------------------------------------ + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on multi_partkey_srf +(4 rows) + +select * from multi_partkey_srf(cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +explain (costs off) select * from pipelined_array_f(cursor (select * from employees)) limit 10; + QUERY PLAN +------------------------------------------------------ + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on pipelined_array_f +(4 rows) + +select * from pipelined_array_f(cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +explain (costs off) select * from no_partition_srf(cursor (select * from employees)) limit 10; + QUERY PLAN +----------------------------------------- + Limit + -> Function Scan on no_partition_srf +(2 rows) + +select * from no_partition_srf(cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- + 1 | 60 | abc | def | 123 | 123 + 2 | 60 | abc | def | 123 | 123 + 3 | 60 | abc | def | 123 | 123 + 4 | 60 | abc | def | 123 | 123 + 5 | 60 | abc | def | 123 | 123 + 6 | 60 | abc | def | 123 | 123 + 7 | 60 | abc | def | 123 | 123 + 8 | 60 | abc | def | 123 | 123 + 9 | 60 | abc | def | 123 | 123 + 10 | 60 | abc | def | 123 | 123 +(10 rows) + +-- test count(*) +explain (costs off) select count(*) from hash_srf(cursor (select * from employees)); + QUERY PLAN +---------------------------------------------- + Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Function Scan on hash_srf +(4 rows) + +select count(*) from hash_srf(cursor (select * from employees)); + count +------- + 100 +(1 row) + +-- test multi cursor args +CREATE OR REPLACE FUNCTION multi_cursor_srf (p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p1 by hash(employee_id)) IS + out_rec_1 my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); + out_rec_2 my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p1 INTO out_rec_1.employee_id, out_rec_1.department_id, out_rec_1.first_name, out_rec_1.last_name, out_rec_1.email, out_rec_1.phone_number; -- input row + EXIT WHEN p1%NOTFOUND; + FETCH p2 INTO out_rec_2.employee_id, out_rec_2.department_id, out_rec_2.first_name, out_rec_2.last_name, out_rec_2.email, out_rec_2.phone_number; -- input row + EXIT WHEN p2%NOTFOUND; + return next out_rec_1; + END LOOP; + RETURN; +END multi_cursor_srf; +/ +NOTICE: immutable would be set if parallel_enable specified +explain (costs off) select * from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)) limit 10; + QUERY PLAN +----------------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on multi_cursor_srf +(4 rows) + +select * from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +(10 rows) + +explain (costs off) select count(*) from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)); + QUERY PLAN +----------------------------------------------------- + Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Function Scan on multi_cursor_srf +(4 rows) + +select count(*) from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)); + count +------- + 100 +(1 row) + +-- nested function call +explain (costs off) select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; + QUERY PLAN +---------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Function Scan on hash_srf +(4 rows) + +select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; + employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +-- functionscan join +explain (costs off) select * from hash_srf(cursor (select * from employees)) a, hash_srf(cursor (select * from employees)) b limit 10; + QUERY PLAN +------------------------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Nested Loop + -> Streaming(type: BROADCAST dop: 2/2) + -> Function Scan on hash_srf a + -> Function Scan on hash_srf b +(7 rows) + +select * from hash_srf(cursor (select * from employees)) a, hash_srf(cursor (select * from employees)) b limit 10; + employee_id | department_id | first_name | last_name | email | phone_number | employee_id | department_id | first_name | last_name | email | phone_number +-------------+---------------+------------+-----------+-------+--------------+-------------+---------------+------------+-----------+-------+-------------- +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +--?* +(10 rows) + +-- targetlist +explain (costs off) select hash_srf(cursor (select * from employees)) limit 10; + QUERY PLAN +-------------- + Limit + -> Result +(2 rows) + +select hash_srf(cursor (select * from employees)) limit 10; + hash_srf +------------------------- + (1,60,abc,def,123,123) + (2,60,abc,def,123,123) + (3,60,abc,def,123,123) + (4,60,abc,def,123,123) + (5,60,abc,def,123,123) + (6,60,abc,def,123,123) + (7,60,abc,def,123,123) + (8,60,abc,def,123,123) + (9,60,abc,def,123,123) + (10,60,abc,def,123,123) +(10 rows) + +-- subquery cannot smp +explain (costs off) select 1, (select count(*) from hash_srf(cursor (select * from employees))); + QUERY PLAN +----------------------------------------- + Result + InitPlan 1 (returns $0) + -> Aggregate + -> Function Scan on hash_srf +(4 rows) + +select 1, (select count(*) from hash_srf(cursor (select * from employees))); + ?column? | count +----------+------- + 1 | 100 +(1 row) + +-- test create or replace +CREATE OR REPLACE FUNCTION any_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END any_srf; +/ +select parallel_cursor_seq, parallel_cursor_strategy, parallel_cursor_partkey from pg_proc_ext where proc_oid = 'any_srf'::regproc; + parallel_cursor_seq | parallel_cursor_strategy | parallel_cursor_partkey +---------------------+--------------------------+------------------------- +(0 rows) + +CREATE OR REPLACE FUNCTION any_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END any_srf; +/ +NOTICE: immutable would be set if parallel_enable specified +select parallel_cursor_seq, parallel_cursor_strategy, parallel_cursor_partkey from pg_proc_ext where proc_oid = 'any_srf'::regproc; + parallel_cursor_seq | parallel_cursor_strategy | parallel_cursor_partkey +---------------------+--------------------------+------------------------- + 0 | 0 | {} +(1 row) + +-- set provolatile. stable/volatile with parallel_enable would throw error +CREATE OR REPLACE FUNCTION stable_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ stable parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END stable_f; +/ +ERROR: only immutable can be set if parallel_enable specified +CREATE OR REPLACE FUNCTION volatile_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ volatile parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END volatile_f; +/ +ERROR: only immutable can be set if parallel_enable specified +CREATE OR REPLACE FUNCTION immutable_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ immutable parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END immutable_f; +/ +-- Alter Function set volatile/stable would clear parallel_cursor info +alter function immutable_f(p SYS_REFCURSOR) volatile; +select parallel_cursor_seq, parallel_cursor_strategy, parallel_cursor_partkey from pg_proc_ext where proc_oid = 'immutable_f'::regproc; + parallel_cursor_seq | parallel_cursor_strategy | parallel_cursor_partkey +---------------------+--------------------------+------------------------- +(0 rows) + +alter function immutable_f(p SYS_REFCURSOR) stable; +alter function immutable_f(p SYS_REFCURSOR) immutable; +-- throw error when the operation of parallel cursor is not FETCH CURSOR +CREATE OR REPLACE FUNCTION invalid_opr_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH absolute 5 from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_opr_f; +/ +NOTICE: immutable would be set if parallel_enable specified +ERROR: only support FETCH CURSOR for parallel cursor "p" +CONTEXT: compilation of PL/pgSQL function "invalid_opr_f" near line 4 +CREATE OR REPLACE FUNCTION invalid_opr_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH backward from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_opr_f; +/ +NOTICE: immutable would be set if parallel_enable specified +ERROR: only support FETCH CURSOR for parallel cursor "p" +CONTEXT: compilation of PL/pgSQL function "invalid_opr_f" near line 4 +CREATE OR REPLACE FUNCTION invalid_opr_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH prior from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_opr_f; +/ +NOTICE: immutable would be set if parallel_enable specified +ERROR: only support FETCH CURSOR for parallel cursor "p" +CONTEXT: compilation of PL/pgSQL function "invalid_opr_f" near line 4 +-- test specified non refcursor type +CREATE OR REPLACE FUNCTION invalid_type_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition a by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_type_f; +/ +NOTICE: immutable would be set if parallel_enable specified +ERROR: partition expr must be cursor-type parameter +CREATE OR REPLACE FUNCTION invalid_type_f (p SYS_REFCURSOR, a int) RETURN setof my_outrec_typ parallel_enable (partition a by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_type_f; +/ +NOTICE: immutable would be set if parallel_enable specified +ERROR: partition expr must be cursor-type parameter +-- create non-SRF/pipelined function +CREATE OR REPLACE FUNCTION return_int_f (p SYS_REFCURSOR) RETURN int parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); + res int := 0; +BEGIN + LOOP + FETCH from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + res := res + 1; + END LOOP; + RETURN res; +END return_int_f; +/ +NOTICE: immutable would be set if parallel_enable specified +explain (costs off) select * from return_int_f(cursor (select * from employees)); + QUERY PLAN +------------------------------- + Function Scan on return_int_f +(1 row) + +select * from return_int_f(cursor (select * from employees)); + return_int_f +-------------- + 100 +(1 row) + +-- declare cursor +begin; +declare xc no scroll cursor for select * from employees; +explain select * from hash_srf('xc'); + QUERY PLAN +------------------------------------------------------------------- + Function Scan on hash_srf (cost=0.25..10.25 rows=1000 width=358) +(1 row) + +end; +-- test bulk collect +CREATE OR REPLACE FUNCTION bulk_collect_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); + emp_tab table_my_outrec_typ; +BEGIN + LOOP + FETCH p bulk collect INTO emp_tab limit 5; -- input row + EXIT WHEN p%NOTFOUND; + out_rec := emp_tab(emp_tab.first); + return next out_rec; + END LOOP; + RETURN; +END bulk_collect_f; +/ +NOTICE: immutable would be set if parallel_enable specified +explain (costs off) select count(*) from bulk_collect_f(cursor (select * from employees)); + QUERY PLAN +--------------------------------------------------- + Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Function Scan on bulk_collect_f +(4 rows) + +select count(*) from bulk_collect_f(cursor (select * from employees)); + count +------- + 20 +(1 row) + +-- create package +create or replace package my_pkg as + FUNCTION pkg_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any); +end my_pkg; +/ +NOTICE: immutable would be set if parallel_enable specified +create or replace package body my_pkg as + FUNCTION pkg_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END pkg_f; +end my_pkg; +/ +NOTICE: immutable would be set if parallel_enable specified +NOTICE: immutable would be set if parallel_enable specified +explain (costs off) select count(*) from my_pkg.pkg_f(cursor (select * from employees)); + QUERY PLAN +---------------------------------------------- + Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Function Scan on pkg_f +(4 rows) + +select count(*) from my_pkg.pkg_f(cursor (select * from employees)); + count +------- + 100 +(1 row) + +drop schema parallel_enable_function cascade; +NOTICE: drop cascades to 15 other objects +DETAIL: drop cascades to table employees +drop cascades to type my_outrec_typ +drop cascades to function hash_srf(refcursor) +drop cascades to function multi_partkey_srf(refcursor) +drop cascades to type table_my_outrec_typ +drop cascades to function pipelined_table_f(refcursor) +drop cascades to function pipelined_array_f(refcursor) +drop cascades to function no_partition_srf(refcursor) +drop cascades to function multi_cursor_srf(refcursor,refcursor) +drop cascades to function any_srf(refcursor) +drop cascades to function immutable_f(refcursor) +drop cascades to function return_int_f(refcursor) +drop cascades to function bulk_collect_f(refcursor) +--?drop cascades to package.* +drop cascades to function parallel_enable_function.pkg_f(refcursor) diff --git a/src/test/regress/expected/smp_cursor.out b/src/test/regress/expected/smp_cursor.out new file mode 100644 index 0000000000..96b492e4cf --- /dev/null +++ b/src/test/regress/expected/smp_cursor.out @@ -0,0 +1,523 @@ +create schema smp_cursor; +set search_path=smp_cursor; +create table t1(a int, b int, c int, d bigint); +insert into t1 values(generate_series(1, 100), generate_series(1, 10), generate_series(1, 2), generate_series(1, 50)); +analyze t1; +set query_dop=1002; +explain (costs off) select * from t1; + QUERY PLAN +---------------------------------------- + Streaming(type: LOCAL GATHER dop: 1/2) + -> Seq Scan on t1 +(2 rows) + +set enable_auto_explain = on; +set auto_explain_level = notice; +-- test cursor smp +begin; +declare xc no scroll cursor for select * from t1; +fetch xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration:.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +end; +-- test plan hint +begin; +declare xc no scroll cursor for select /*+ set(query_dop 1) */ * from t1; +fetch xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select /*+ set(query_dop 1) */ * from t1; +Name: datanode1 +--?Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +end; +set query_dop = 1; +begin; +declare xc no scroll cursor for select /*+ set(query_dop 1002) */ * from t1; +fetch xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select /*+ set(query_dop 1002) */ * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration:.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +end; +-- scroll cursor can not smp +set query_dop = 1002; +begin; +declare xc cursor for select /*+ set(query_dop 1002) */ * from t1; +fetch xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc cursor for select /*+ set(query_dop 1002) */ * from t1; +Name: datanode1 +--?Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +---+---+---+--- + 1 | 1 | 1 | 1 +(1 row) + +end; +-- cursor declared with plpgsql can not smp +declare + cursor xc no scroll is select * from t1; + tmp t1%ROWTYPE; +begin + open xc; + fetch xc into tmp; + close xc; +end; +/ +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: select * from t1 +Name: datanode1 +--?Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +CONTEXT: PL/pgSQL function inline_code_block line 5 at FETCH +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +CONTEXT: PL/pgSQL function inline_code_block line 5 at FETCH +-- test resource conflict checking +begin; +declare xc no scroll cursor for select * from t1; +drop table t1; +ERROR: cannot DROP TABLE "t1" because it is being used by active queries in this session +end; +-- test cursor with hold +begin; +declare xc no scroll cursor with hold for select * from t1; +fetch xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor with hold for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration:.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +end; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor with hold for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration:.* + +fetch absolute 10 xc; + a | b | c | d +----+----+---+---- +--?.* +(1 row) + +close xc; +-- test cursor backward error +begin; +declare xc no scroll cursor for select * from t1; +fetch absolute 10 xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +----+----+---+---- +--?.* +(1 row) + +fetch absolute 9 xc; +ERROR: cursor with stream plan do not support scan backward. +end; +-- test cursor other operate +begin; +declare xc no scroll cursor for select * from t1; +fetch first xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +fetch forward xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +fetch absolute 5 xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +---+---+---+--- +--?.* +(1 row) + +fetch relative 5 xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +----+----+---+---- +--?.* +(1 row) + +fetch all xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + + a | b | c | d +-----+----+---+---- +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +(90 rows) + +move xc; +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: declare xc no scroll cursor for select * from t1; +Name: datanode1 +--?Streaming(type: LOCAL GATHER dop: 1/2).* + Output: a, b, c, d + Spawn on: All datanodes + Consumer Nodes: All datanodes +--? -> Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +end; +drop schema smp_cursor cascade; +NOTICE: drop cascades to table t1 diff --git a/src/test/regress/input/gs_dump_package.source b/src/test/regress/input/gs_dump_package.source index ec7da74b5e..76f55e960d 100644 --- a/src/test/regress/input/gs_dump_package.source +++ b/src/test/regress/input/gs_dump_package.source @@ -105,12 +105,54 @@ va pck9.r1; end pck6; / +-- test parallel_enable +CREATE TYPE my_outrec_typ AS ( + employee_id numeric(6,0), + department_id numeric, + first_name character varying(30), + last_name character varying(30), + email character varying(30), + phone_number character varying(30) +); +create or replace package my_pkg as + FUNCTION pkg_f_1 (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any); + FUNCTION pkg_f_2 (p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p1 by hash(a,b)); +end my_pkg; +/ + +create or replace package body my_pkg as + FUNCTION pkg_f_1 (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END pkg_f_1; + FUNCTION pkg_f_2 (p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p1 by hash(a,b)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p1 INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p1%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END pkg_f_2; +end my_pkg; +/ + \! @abs_bindir@/gs_dump dump_package_db -p @portstring@ -f @abs_bindir@/dump_package.tar -F t >/dev/null 2>&1; echo $? \! @abs_bindir@/gs_restore -d restore_package_db -p @portstring@ @abs_bindir@/dump_package.tar >/dev/null 2>&1; echo $? \c restore_package_db call rowtype_pckg.rowtype_func(); +\sf my_pkg.pkg_f_1 +\sf my_pkg.pkg_f_2 + \c regression drop database if exists restore_subpartition_db; drop database if exists dump_subpartition_db; diff --git a/src/test/regress/output/gs_dump_package.source b/src/test/regress/output/gs_dump_package.source index 6dcaa12130..e18fab6ec2 100644 --- a/src/test/regress/output/gs_dump_package.source +++ b/src/test/regress/output/gs_dump_package.source @@ -100,6 +100,49 @@ create or replace package pck6 is va pck9.r1; end pck6; / +-- test parallel_enable +CREATE TYPE my_outrec_typ AS ( + employee_id numeric(6,0), + department_id numeric, + first_name character varying(30), + last_name character varying(30), + email character varying(30), + phone_number character varying(30) +); +create or replace package my_pkg as + FUNCTION pkg_f_1 (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any); + FUNCTION pkg_f_2 (p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p1 by hash(a,b)); +end my_pkg; +/ +NOTICE: immutable would be set if parallel_enable specified +NOTICE: immutable would be set if parallel_enable specified +create or replace package body my_pkg as + FUNCTION pkg_f_1 (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END pkg_f_1; + FUNCTION pkg_f_2 (p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p1 by hash(a,b)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p1 INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p1%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END pkg_f_2; +end my_pkg; +/ +NOTICE: immutable would be set if parallel_enable specified +NOTICE: immutable would be set if parallel_enable specified +NOTICE: immutable would be set if parallel_enable specified +NOTICE: immutable would be set if parallel_enable specified \! @abs_bindir@/gs_dump dump_package_db -p @portstring@ -f @abs_bindir@/dump_package.tar -F t >/dev/null 2>&1; echo $? 0 \! @abs_bindir@/gs_restore -d restore_package_db -p @portstring@ @abs_bindir@/dump_package.tar >/dev/null 2>&1; echo $? @@ -112,6 +155,32 @@ call rowtype_pckg.rowtype_func(); 2 | b (2 rows) +\sf my_pkg.pkg_f_1 +CREATE OR REPLACE FUNCTION public.my_pkg.pkg_f_1(p SYS_REFCURSOR) + RETURN SETOF my_outrec_typ IMMUTABLE NOT FENCED NOT SHIPPABLE PARALLEL_ENABLE (PARTITION p BY ANY) PACKAGE +AS DECLARE out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END ; +/ +\sf my_pkg.pkg_f_2 +CREATE OR REPLACE FUNCTION public.my_pkg.pkg_f_2(p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) + RETURN SETOF my_outrec_typ IMMUTABLE NOT FENCED NOT SHIPPABLE PARALLEL_ENABLE (PARTITION p1 BY HASH(a,b)) PACKAGE +AS DECLARE out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p1 INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p1%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END ; +/ \c regression drop database if exists restore_subpartition_db; NOTICE: database "restore_subpartition_db" does not exist, skipping diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 7684bd9149..133fa781cf 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -42,7 +42,7 @@ test: extract_pushdown_or_clause test: workload_manager test: spm_adaptive_gplan -test: smp +test: smp smp_cursor parallel_enable_function test: alter_hw_package test: hw_grant_package gsc_func gsc_db test: uppercase_attribute_name decode_compatible_with_o outerjoin_bugfix chr_gbk diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 38984f3784..1f9aadadf2 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -33,7 +33,7 @@ test: extract_pushdown_or_clause test: workload_manager test: spm_adaptive_gplan -test: smp +test: smp smp_cursor parallel_enable_function test: alter_hw_package test: hw_grant_package gsc_func gsc_db test: uppercase_attribute_name decode_compatible_with_o outerjoin_bugfix diff --git a/src/test/regress/sql/parallel_enable_function.sql b/src/test/regress/sql/parallel_enable_function.sql new file mode 100644 index 0000000000..21ceed5821 --- /dev/null +++ b/src/test/regress/sql/parallel_enable_function.sql @@ -0,0 +1,362 @@ +create schema parallel_enable_function; +set search_path=parallel_enable_function; + +create table employees (employee_id number(6), department_id NUMBER, first_name varchar2(30), last_name varchar2(30), email varchar2(30), phone_number varchar2(30)); + +BEGIN + FOR i IN 1..100 LOOP + INSERT INTO employees VALUES (i, 60, 'abc', 'def', '123', '123'); + END LOOP; + COMMIT; +END; +/ + +CREATE TYPE my_outrec_typ AS ( + employee_id numeric(6,0), + department_id numeric, + first_name character varying(30), + last_name character varying(30), + email character varying(30), + phone_number character varying(30) +); + + +-- create srf function with parallel_enable +CREATE OR REPLACE FUNCTION hash_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END hash_srf; +/ + +CREATE OR REPLACE FUNCTION any_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END any_srf; +/ + +-- create function with multi-partkey +CREATE OR REPLACE FUNCTION multi_partkey_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id, department_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END multi_partkey_srf; +/ + +-- create pipelined function +create type table_my_outrec_typ is table of my_outrec_typ; + +CREATE OR REPLACE FUNCTION pipelined_table_f (p SYS_REFCURSOR) RETURN table_my_outrec_typ pipelined parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + pipe row(out_rec); + END LOOP; +END pipelined_table_f; +/ + +CREATE OR REPLACE FUNCTION pipelined_array_f (p SYS_REFCURSOR) RETURN _employees PIPELINED parallel_enable (partition p by any) + IS + in_rec my_outrec_typ; + BEGIN +LOOP + FETCH p INTO in_rec; + EXIT WHEN p%NOTFOUND; + PIPE ROW (in_rec); + END LOOP; +END pipelined_array_f; +/ + +-- without partition by +CREATE OR REPLACE FUNCTION no_partition_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END no_partition_srf; +/ + +-- call function +set query_dop = 1002; +explain (costs off) select * from hash_srf(cursor (select * from employees)) limit 10; +select * from hash_srf(cursor (select * from employees)) limit 10; + +explain (costs off) select * from any_srf(cursor (select * from employees)) limit 10; +select * from any_srf(cursor (select * from employees)) limit 10; + +explain (costs off) select * from pipelined_table_f(cursor (select * from employees)) limit 10; +select * from pipelined_table_f(cursor (select * from employees)) limit 10; + +explain (costs off) select * from multi_partkey_srf(cursor (select * from employees)) limit 10; +select * from multi_partkey_srf(cursor (select * from employees)) limit 10; + +explain (costs off) select * from pipelined_array_f(cursor (select * from employees)) limit 10; +select * from pipelined_array_f(cursor (select * from employees)) limit 10; + +explain (costs off) select * from no_partition_srf(cursor (select * from employees)) limit 10; +select * from no_partition_srf(cursor (select * from employees)) limit 10; + +-- test count(*) +explain (costs off) select count(*) from hash_srf(cursor (select * from employees)); +select count(*) from hash_srf(cursor (select * from employees)); + +-- test multi cursor args +CREATE OR REPLACE FUNCTION multi_cursor_srf (p1 SYS_REFCURSOR, p2 SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p1 by hash(employee_id)) IS + out_rec_1 my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); + out_rec_2 my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p1 INTO out_rec_1.employee_id, out_rec_1.department_id, out_rec_1.first_name, out_rec_1.last_name, out_rec_1.email, out_rec_1.phone_number; -- input row + EXIT WHEN p1%NOTFOUND; + FETCH p2 INTO out_rec_2.employee_id, out_rec_2.department_id, out_rec_2.first_name, out_rec_2.last_name, out_rec_2.email, out_rec_2.phone_number; -- input row + EXIT WHEN p2%NOTFOUND; + return next out_rec_1; + END LOOP; + RETURN; +END multi_cursor_srf; +/ + +explain (costs off) select * from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)) limit 10; +select * from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)) limit 10; + +explain (costs off) select count(*) from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)); +select count(*) from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)); + +-- nested function call +explain (costs off) select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; +select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; + +-- functionscan join +explain (costs off) select * from hash_srf(cursor (select * from employees)) a, hash_srf(cursor (select * from employees)) b limit 10; +select * from hash_srf(cursor (select * from employees)) a, hash_srf(cursor (select * from employees)) b limit 10; + +-- targetlist +explain (costs off) select hash_srf(cursor (select * from employees)) limit 10; +select hash_srf(cursor (select * from employees)) limit 10; + +-- subquery cannot smp +explain (costs off) select 1, (select count(*) from hash_srf(cursor (select * from employees))); +select 1, (select count(*) from hash_srf(cursor (select * from employees))); + +-- test create or replace +CREATE OR REPLACE FUNCTION any_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END any_srf; +/ + +select parallel_cursor_seq, parallel_cursor_strategy, parallel_cursor_partkey from pg_proc_ext where proc_oid = 'any_srf'::regproc; + +CREATE OR REPLACE FUNCTION any_srf (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END any_srf; +/ + +select parallel_cursor_seq, parallel_cursor_strategy, parallel_cursor_partkey from pg_proc_ext where proc_oid = 'any_srf'::regproc; + +-- set provolatile. stable/volatile with parallel_enable would throw error +CREATE OR REPLACE FUNCTION stable_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ stable parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END stable_f; +/ + +CREATE OR REPLACE FUNCTION volatile_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ volatile parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END volatile_f; +/ + +CREATE OR REPLACE FUNCTION immutable_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ immutable parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END immutable_f; +/ + +-- Alter Function set volatile/stable would clear parallel_cursor info +alter function immutable_f(p SYS_REFCURSOR) volatile; +select parallel_cursor_seq, parallel_cursor_strategy, parallel_cursor_partkey from pg_proc_ext where proc_oid = 'immutable_f'::regproc; + +alter function immutable_f(p SYS_REFCURSOR) stable; +alter function immutable_f(p SYS_REFCURSOR) immutable; + +-- throw error when the operation of parallel cursor is not FETCH CURSOR +CREATE OR REPLACE FUNCTION invalid_opr_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH absolute 5 from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_opr_f; +/ + +CREATE OR REPLACE FUNCTION invalid_opr_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH backward from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_opr_f; +/ + +CREATE OR REPLACE FUNCTION invalid_opr_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH prior from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_opr_f; +/ + +-- test specified non refcursor type +CREATE OR REPLACE FUNCTION invalid_type_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition a by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_type_f; +/ + +CREATE OR REPLACE FUNCTION invalid_type_f (p SYS_REFCURSOR, a int) RETURN setof my_outrec_typ parallel_enable (partition a by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END invalid_type_f; +/ + +-- create non-SRF/pipelined function +CREATE OR REPLACE FUNCTION return_int_f (p SYS_REFCURSOR) RETURN int parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); + res int := 0; +BEGIN + LOOP + FETCH from p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + res := res + 1; + END LOOP; + RETURN res; +END return_int_f; +/ + +explain (costs off) select * from return_int_f(cursor (select * from employees)); +select * from return_int_f(cursor (select * from employees)); + +-- declare cursor +begin; +declare xc no scroll cursor for select * from employees; +explain select * from hash_srf('xc'); +end; + +-- test bulk collect +CREATE OR REPLACE FUNCTION bulk_collect_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by hash(employee_id)) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); + emp_tab table_my_outrec_typ; +BEGIN + LOOP + FETCH p bulk collect INTO emp_tab limit 5; -- input row + EXIT WHEN p%NOTFOUND; + out_rec := emp_tab(emp_tab.first); + return next out_rec; + END LOOP; + RETURN; +END bulk_collect_f; +/ + +explain (costs off) select count(*) from bulk_collect_f(cursor (select * from employees)); +select count(*) from bulk_collect_f(cursor (select * from employees)); + +-- create package +create or replace package my_pkg as + FUNCTION pkg_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any); +end my_pkg; +/ + +create or replace package body my_pkg as + FUNCTION pkg_f (p SYS_REFCURSOR) RETURN setof my_outrec_typ parallel_enable (partition p by any) IS + out_rec my_outrec_typ := my_outrec_typ(NULL, NULL, NULL, NULL, NULL, NULL); +BEGIN + LOOP + FETCH p INTO out_rec.employee_id, out_rec.department_id, out_rec.first_name, out_rec.last_name, out_rec.email, out_rec.phone_number; -- input row + EXIT WHEN p%NOTFOUND; + return next out_rec; + END LOOP; + RETURN; +END pkg_f; +end my_pkg; +/ + +explain (costs off) select count(*) from my_pkg.pkg_f(cursor (select * from employees)); +select count(*) from my_pkg.pkg_f(cursor (select * from employees)); + +drop schema parallel_enable_function cascade; diff --git a/src/test/regress/sql/smp_cursor.sql b/src/test/regress/sql/smp_cursor.sql new file mode 100644 index 0000000000..8334505c59 --- /dev/null +++ b/src/test/regress/sql/smp_cursor.sql @@ -0,0 +1,82 @@ +create schema smp_cursor; +set search_path=smp_cursor; + +create table t1(a int, b int, c int, d bigint); +insert into t1 values(generate_series(1, 100), generate_series(1, 10), generate_series(1, 2), generate_series(1, 50)); +analyze t1; + +set query_dop=1002; + +explain (costs off) select * from t1; + +set enable_auto_explain = on; +set auto_explain_level = notice; +-- test cursor smp +begin; +declare xc no scroll cursor for select * from t1; +fetch xc; +end; + +-- test plan hint +begin; +declare xc no scroll cursor for select /*+ set(query_dop 1) */ * from t1; +fetch xc; +end; + +set query_dop = 1; +begin; +declare xc no scroll cursor for select /*+ set(query_dop 1002) */ * from t1; +fetch xc; +end; + +-- scroll cursor can not smp +set query_dop = 1002; +begin; +declare xc cursor for select /*+ set(query_dop 1002) */ * from t1; +fetch xc; +end; + +-- cursor declared with plpgsql can not smp +declare + cursor xc no scroll is select * from t1; + tmp t1%ROWTYPE; +begin + open xc; + fetch xc into tmp; + close xc; +end; +/ + +-- test resource conflict checking +begin; +declare xc no scroll cursor for select * from t1; +drop table t1; +end; + +-- test cursor with hold +begin; +declare xc no scroll cursor with hold for select * from t1; +fetch xc; +end; +fetch absolute 10 xc; +close xc; + +-- test cursor backward error +begin; +declare xc no scroll cursor for select * from t1; +fetch absolute 10 xc; +fetch absolute 9 xc; +end; + +-- test cursor other operate +begin; +declare xc no scroll cursor for select * from t1; +fetch first xc; +fetch forward xc; +fetch absolute 5 xc; +fetch relative 5 xc; +fetch all xc; +move xc; +end; + +drop schema smp_cursor cascade; \ No newline at end of file -- Gitee From 45a52830dd3a21a68797086f0a1f9226b5a5fc55 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 15 Jul 2024 20:02:39 +0800 Subject: [PATCH 059/347] =?UTF-8?q?=E5=A4=84=E7=90=86=E6=A3=80=E8=A7=86?= =?UTF-8?q?=E6=84=8F=E8=A7=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/pg_proc_ext.cpp | 13 +++++++++++-- src/include/catalog/pg_proc_ext.h | 14 ++++++++++++-- .../upgrade_catalog_maindb_92_941.sql | 2 +- .../upgrade_catalog_otherdb_92_941.sql | 2 +- 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/common/backend/catalog/pg_proc_ext.cpp b/src/common/backend/catalog/pg_proc_ext.cpp index 98c91538cb..2fb1d247f6 100644 --- a/src/common/backend/catalog/pg_proc_ext.cpp +++ b/src/common/backend/catalog/pg_proc_ext.cpp @@ -3,10 +3,19 @@ * pg_proc_ext.cpp * routines to support manipulation of the pg_proc_ext relation * - * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2021, openGauss Contributors * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- * * IDENTIFICATION * src/common/backend/catalog/pg_proc_ext.cpp diff --git a/src/include/catalog/pg_proc_ext.h b/src/include/catalog/pg_proc_ext.h index 46c8d52f88..bc43c6d0c6 100644 --- a/src/include/catalog/pg_proc_ext.h +++ b/src/include/catalog/pg_proc_ext.h @@ -3,9 +3,19 @@ * pg_proc_ext.h * extension of pg_proc * + * Portions Copyright (c) 2021, openGauss Contributors * - * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- * * src/include/catalog/pg_proc_ext.h * diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql index 3f4ca4a4e9..5c5ed1b90c 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql @@ -9,7 +9,7 @@ CREATE TABLE IF NOT EXISTS pg_catalog.pg_proc_ext )WITH(oids=false) TABLESPACE pg_default; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3488; -CREATE INDEX pg_proc_ext_proc_oid_index ON pg_catalog.pg_proc_ext USING BTREE(proc_oid OID_OPS); +CREATE UNIQUE INDEX pg_proc_ext_proc_oid_index ON pg_catalog.pg_proc_ext USING BTREE(proc_oid OID_OPS); SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; GRANT SELECT ON pg_catalog.pg_proc_ext TO PUBLIC; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql index c9fb55f47c..85d451427c 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql @@ -9,7 +9,7 @@ CREATE TABLE IF NOT EXISTS pg_catalog.pg_proc_ext )WITH(oids=false) TABLESPACE pg_default; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3488; -CREATE INDEX pg_proc_ext_proc_oid_index ON pg_catalog.pg_proc_ext USING BTREE(proc_oid OID_OPS); +CREATE UNIQUE INDEX pg_proc_ext_proc_oid_index ON pg_catalog.pg_proc_ext USING BTREE(proc_oid OID_OPS); SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; GRANT SELECT ON pg_catalog.pg_event_trigger TO PUBLIC; -- Gitee From c436544b524e162f7877b2c512f7a38c9e7834ac Mon Sep 17 00:00:00 2001 From: lukeman Date: Wed, 17 Jul 2024 16:32:22 +0800 Subject: [PATCH 060/347] =?UTF-8?q?=E6=8B=B7=E8=B4=9Dso=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/CMakeLists.txt | 1 + src/gausskernel/Makefile | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2634d52819..d9a73aee6e 100755 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -223,6 +223,7 @@ if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF) endif() endif() install(DIRECTORY ${LIBCURL_LIB_PATH} DESTINATION .) +install(DIRECTORY ${AWSSDK_LIB_PATH} DESTINATION .) if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") install(DIRECTORY ${KERBEROS_SBIN_PATH}/ DESTINATION bin) install(DIRECTORY ${KERBEROS_BIN_PATH} DESTINATION .) diff --git a/src/gausskernel/Makefile b/src/gausskernel/Makefile index ac5791f433..e788a35227 100755 --- a/src/gausskernel/Makefile +++ b/src/gausskernel/Makefile @@ -697,6 +697,8 @@ endif cp '$(ZSTD_LIB_PATH)'/libzstd.so* '$(DESTDIR)$(libdir)/' cp '$(LIBODBC_LIB_PATH)'/libodbc* '$(DESTDIR)$(libdir)/' cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' + cp -d '$(AWSSDK_LIB_PATH)'/libaws-cpp-sdk-core.so* '$(DESTDIR)$(libdir)/' + cp -d '$(AWSSDK_LIB_PATH)'/libaws-cpp-sdk-s3.so* '$(DESTDIR)$(libdir)/' endif rm -rf '$(DESTDIR)$(libdir)/../temp' -- Gitee From a32719b4cddc4eb34c3c21ee8b32f5ca93dc30fc Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Wed, 17 Jul 2024 20:08:31 +0800 Subject: [PATCH 061/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dgs=5Fdump=E7=9A=84cor?= =?UTF-8?q?e=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_dump.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 080441d1bc..884a4059fb 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -13441,8 +13441,8 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname, " "%s, " "(SELECT 1 FROM pg_depend WHERE objid = p.oid AND objid = refobjid AND refclassid = 1255 LIMIT 1) AS selfloop, " - "proargnames[o.parallel_cursor_seq + 1] AS parallelCursorName, o.parallel_cursor_strategy AS parallelCursorStrategy, " - "pg_catalog.array_to_string(o.parallel_cursor_partkey, ', ') AS parallelCursorPartKey " + "proargnames[o.parallel_cursor_seq + 1] AS parallelcursorname, o.parallel_cursor_strategy AS parallelcursorstrategy, " + "pg_catalog.array_to_string(o.parallel_cursor_partkey, ', ') AS parallelcursorpartkey " "FROM pg_catalog.pg_proc p left join pg_catalog.pg_proc_ext o on p.oid = o.proc_oid " "WHERE p.oid = '%u'::pg_catalog.oid", isHasFencedmode ? "fencedmode" : "NULL AS fencedmode", @@ -13475,9 +13475,9 @@ static void dumpFunc(Archive* fout, FuncInfo* finfo) proshippable = PQgetvalue(res, 0, PQfnumber(res, "proshippable")); propackage = PQgetvalue(res, 0, PQfnumber(res, "propackage")); propackageid = PQgetvalue(res, 0, PQfnumber(res, "propackageid")); - parallelCursorName = PQgetvalue(res, 0, PQfnumber(res, "parallelCursorName")); - parallelCursorStrategy = PQgetvalue(res, 0, PQfnumber(res, "parallelCursorStrategy")); - parallelCursorPartKey = PQgetvalue(res, 0, PQfnumber(res, "parallelCursorPartKey")); + parallelCursorName = PQgetvalue(res, 0, PQfnumber(res, "parallelcursorname")); + parallelCursorStrategy = PQgetvalue(res, 0, PQfnumber(res, "parallelcursorstrategy")); + parallelCursorPartKey = PQgetvalue(res, 0, PQfnumber(res, "parallelcursorpartkey")); if ((gdatcompatibility != NULL) && strcmp(gdatcompatibility, B_FORMAT) == 0) { /* get definer user name */ -- Gitee From da517259e98a0f30826f9a8650f3f7741adbc7eb Mon Sep 17 00:00:00 2001 From: gentle_hu Date: Thu, 18 Jul 2024 15:53:40 +0800 Subject: [PATCH 062/347] add version mode into version.cfg --- build/script/cmake_package_mini.sh | 6 +++++- build/script/utils/cmake_compile.sh | 14 +++++++++++++- build/script/utils/make_compile.sh | 10 ++++++++++ docker/upgrade/upgrade_common.sh | 8 ++++---- liteom/upgrade_common.sh | 8 ++++---- 5 files changed, 36 insertions(+), 10 deletions(-) diff --git a/build/script/cmake_package_mini.sh b/build/script/cmake_package_mini.sh index a1b83182e9..7e918aed00 100644 --- a/build/script/cmake_package_mini.sh +++ b/build/script/cmake_package_mini.sh @@ -529,6 +529,10 @@ function install_gaussdb() commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | cut -d ")" -f 1 | awk '{print $NF}') echo "${commitid}" >>${SCRIPT_DIR}/version.cfg echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 + + #insert the version mode to version.cfg + echo "$version_mode" >> ${SCRIPT_DIR}/version.cfg + echo "End insert version mode into version cfg" >> "$LOG_FILE" 2>&1 } ####################################################################### @@ -811,4 +815,4 @@ mkdir ${ROOT_DIR}/output mv ${ROOT_DIR}/build/script/*.tar.gz ${ROOT_DIR}/output/ test -e ${ROOT_DIR}/build/script/gaussdb.map && mv ${ROOT_DIR}/build/script/gaussdb.map ${ROOT_DIR}/output/ echo "now, all packages has finished!" -exit 0 \ No newline at end of file +exit 0 diff --git a/build/script/utils/cmake_compile.sh b/build/script/utils/cmake_compile.sh index 3a5c680740..0caa1fa316 100644 --- a/build/script/utils/cmake_compile.sh +++ b/build/script/utils/cmake_compile.sh @@ -63,6 +63,15 @@ function get_kernel_commitid() echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 } +####################################################################### +##insert the version mode to version.cfg +####################################################################### +function get_version_mode() +{ + echo "$version_mode" >> ${SCRIPT_DIR}/version.cfg + echo "End insert version mode into version cfg" >> "$LOG_FILE" 2>&1 +} + ####################################################################### ## generate the version file. ####################################################################### @@ -157,6 +166,9 @@ function install_gaussdb() #insert the commitid to version.cfg as the upgrade app path specification get_kernel_commitid + + #insert the version mode to version.cfg + get_version_mode } ####################################################################### @@ -177,4 +189,4 @@ function gaussdb_build() echo "please input right paramenter values server or libpq " exit 1 esac -} \ No newline at end of file +} diff --git a/build/script/utils/make_compile.sh b/build/script/utils/make_compile.sh index c4513f54fe..d5d7f83d4f 100644 --- a/build/script/utils/make_compile.sh +++ b/build/script/utils/make_compile.sh @@ -70,6 +70,15 @@ function get_kernel_commitid() echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 } +####################################################################### +##insert the version mode to version.cfg +####################################################################### +function get_version_mode() +{ + echo "$version_mode" >> ${SCRIPT_DIR}/version.cfg + echo "End insert version mode into version cfg" >> "$LOG_FILE" 2>&1 +} + ####################################################################### ## generate the version file. ####################################################################### @@ -320,6 +329,7 @@ function install_gaussdb() chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 get_kernel_commitid + get_version_mode } ####################################################################### diff --git a/docker/upgrade/upgrade_common.sh b/docker/upgrade/upgrade_common.sh index f995511163..fb63888add 100644 --- a/docker/upgrade/upgrade_common.sh +++ b/docker/upgrade/upgrade_common.sh @@ -298,14 +298,14 @@ function check_version() { if [[ ! -f ${GAUSSHOME}/version.cfg ]]; then die "Cannot find current version.cfg!" ${err_upgrade_pre} fi - old_version=$(tail -n 1 $GAUSSHOME/version.cfg) + old_version=$(sed -n 3p $GAUSSHOME/version.cfg) old_cfg=$(sed -n 2p "$GAUSSHOME"/version.cfg | sed 's/\.//g') # get new version if [[ ! -f $UPGRADE_NEW_PKG_PATH/version.cfg ]]; then die "Cannot find new version.cfg!" ${err_upgrade_pre} fi - new_version=$(tail -n 1 $UPGRADE_NEW_PKG_PATH/version.cfg) + new_version=$(sed -n 3p $UPGRADE_NEW_PKG_PATH/version.cfg) new_cfg=$(sed -n 2p $UPGRADE_NEW_PKG_PATH/version.cfg | sed 's/\.//g') if [[ X"$old_version" == X || X"$old_cfg" == X || X"$new_version" == X || X"$new_cfg" == X ]]; then @@ -510,7 +510,7 @@ function cp_pkg() { new_bin_path="$GAUSS_TMP_PATH"/install_bin_"$new_version" fi #check pkg's version.cfg is equal to version_flag - temppkg_version=$(tail -n 1 "$new_bin_path"/version.cfg) + temppkg_version=$(sed -n 3p "$new_bin_path"/version.cfg) if [[ "$new_version" != "$temppkg_version" ]]; then die "pkg's version.cfg is not correct!" ${err_upgrade_bin} fi @@ -1076,7 +1076,7 @@ function check_real_gaussdb_version() { die "Get real gaussdb version failed" ${err_upgrade_bin} fi local new_version="" - new_version=`tail -n 1 $UPGRADE_NEW_PKG_PATH/version.cfg` + new_version=`sed -n 3p $UPGRADE_NEW_PKG_PATH/version.cfg` debug "new_version: $new_version" debug "real_gaussdb_version: $real_gaussdb_version" if ! echo "$real_gaussdb_version" | grep "$new_version" > /dev/null; then diff --git a/liteom/upgrade_common.sh b/liteom/upgrade_common.sh index 1e92fc31aa..e0fe9057bd 100644 --- a/liteom/upgrade_common.sh +++ b/liteom/upgrade_common.sh @@ -275,7 +275,7 @@ function check_version() { if [[ ! -f "${GAUSSHOME}/version.cfg" ]]; then die "Cannot find current version.cfg!" ${err_upgrade_pre} else - old_version=$(tail -n 1 "$GAUSSHOME"/version.cfg) + old_version=$(sed -n 3p "$GAUSSHOME"/version.cfg) old_cfg=$(sed -n 2p "$GAUSSHOME"/version.cfg | sed 's/\.//g') fi if [[ -f "$GAUSS_UPGRADE_BIN_PATH"/version.cfg ]]; then @@ -286,7 +286,7 @@ function check_version() { die "Cannot find new version.cfg!" ${err_upgrade_pre} fi - new_version=$(tail -n 1 "$new_version_cfg_path") + new_version=$(sed -n 3p "$new_version_cfg_path") new_cfg=$(sed -n 2p "$new_version_cfg_path" | sed 's/\.//g') if [[ X"$old_version" == X || X"$old_cfg" == X || X"$new_version" == X || X"$new_cfg" == X ]]; then @@ -503,7 +503,7 @@ function cp_pkg() { die "chmod files or dirs failed" ${err_upgrade_bin} fi #check pkg's version.cfg is equal to version_flag - temppkg_version=$(tail -n 1 "$new_bin_path"/version.cfg) + temppkg_version=$(sed -n 3p "$new_bin_path"/version.cfg) if [[ "$new_version" != "$temppkg_version" ]]; then die "pkg's version.cfg is not correct!" ${err_upgrade_bin} fi @@ -1253,4 +1253,4 @@ function delete_tmp_files() { fi rm -f "$GAUSS_TMP_PATH"/version_flag rm -f "$GAUSS_TMP_PATH"/record_step.txt -} \ No newline at end of file +} -- Gitee From 50eef0ac3aa4a0ee52a9ba47591787722970a294 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=AE=E5=82=A8-=E7=8E=8B=E5=BB=BA=E8=BE=BE?= Date: Thu, 18 Jul 2024 08:14:53 +0000 Subject: [PATCH 063/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=BB=E6=9C=BA?= =?UTF-8?q?=E6=9C=8D=E5=8A=A1=E5=90=AF=E5=8A=A8=E5=AE=A1=E8=AE=A1=E6=97=A5?= =?UTF-8?q?=E5=BF=97=E4=B8=A2=E5=A4=B1=20=E5=9C=A8=20startup=20=E7=BA=BF?= =?UTF-8?q?=E7=A8=8B=E5=90=AF=E5=8A=A8=E5=89=8D,=20=E5=88=9D=E5=A7=8B?= =?UTF-8?q?=E5=8C=96=20pgaudit=20=E7=BA=BF=E7=A8=8B.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 邮储-王建达 --- .../process/postmaster/postmaster.cpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index 8a081a8967..4cac1013f2 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -1,4 +1,4 @@ -/* ------------------------------------------------------------------------- +/* ------------------------------------------------------------------------- * * postmaster.cpp * This program acts as a clearing house for requests to the @@ -2958,6 +2958,13 @@ int PostmasterMain(int argc, char* argv[]) INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT), sizeof(LogControlData)); g_instance.pid_cxt.SysLoggerPID = SysLogger_Start(); + /* Database Security: Support database audit */ + /* start auditor process */ + /* start the audit collector as needed. */ + if (u_sess->attr.attr_security.Audit_enabled && !dummyStandbyMode) { + pgaudit_start_all(); + } + if (IS_PGXC_DATANODE && !dummyStandbyMode && !isRestoreMode) { StreamObj::startUp(); StreamNodeGroup::StartUp(); @@ -7256,16 +7263,8 @@ static void reaper(SIGNAL_ARGS) && !SS_STANDBY_MODE && !SS_PERFORMING_SWITCHOVER && !SS_STANDBY_FAILOVER && !SS_IN_REFORM) g_instance.pid_cxt.StatementPID = initialize_util_thread(TRACK_STMT_WORKER); - /* Database Security: Support database audit */ - /* start auditor process */ - /* start the audit collector as needed. */ - if (u_sess->attr.attr_security.Audit_enabled && !dummyStandbyMode) { - pgaudit_start_all(); - } - if (t_thrd.postmaster_cxt.audit_primary_start && !t_thrd.postmaster_cxt.audit_primary_failover && !t_thrd.postmaster_cxt.audit_standby_switchover) { - pg_usleep(100000L); pgaudit_system_start_ok(g_instance.attr.attr_network.PostPortNumber); t_thrd.postmaster_cxt.audit_primary_start = false; } -- Gitee From 6fa297b2c44e5e00adeb0fa9d8033b7fca9de135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Thu, 18 Jul 2024 08:34:12 +0000 Subject: [PATCH 064/347] modify error message when create global partition index --- src/bin/gsqlerr/errmsg.txt | 2 +- src/common/backend/parser/parse_utilcmd.cpp | 2 +- src/gausskernel/po/zh_CN.po | 4 ++-- src/test/regress/expected/cstore_unique_index.out | 2 +- src/test/regress/expected/gpi_bitmapscan.out | 4 ++-- src/test/regress/expected/gpi_build_index.out | 4 ++-- src/test/regress/expected/gpi_create_constraint.out | 2 +- src/test/regress/expected/hash_index_001.out | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/bin/gsqlerr/errmsg.txt b/src/bin/gsqlerr/errmsg.txt index f9a4360ad0..ce38477573 100644 --- a/src/bin/gsqlerr/errmsg.txt +++ b/src/bin/gsqlerr/errmsg.txt @@ -25349,7 +25349,7 @@ CAUSE: "invalid" ACTION: "invalid" -GAUSS-05160: "Global partition index only support btree." +GAUSS-05160: "Global partition index only support btree and ubtree." SQLSTATE: 0A000 CAUSE: "invalid" ACTION: "invalid" diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index e30d305e19..6f0f821eb8 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -4296,7 +4296,7 @@ IndexStmt* transformIndexStmt(Oid relid, IndexStmt* stmt, const char* queryStrin if (pg_strcasecmp(stmt->accessMethod, DEFAULT_INDEX_TYPE) != 0 && pg_strcasecmp(stmt->accessMethod, DEFAULT_USTORE_INDEX_TYPE) != 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Global partition index only support btree."))); + errmsg("Global partition index only support btree and ubtree."))); } if (isColStore) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), diff --git a/src/gausskernel/po/zh_CN.po b/src/gausskernel/po/zh_CN.po index 2f061646ad..40bbdfd582 100644 --- a/src/gausskernel/po/zh_CN.po +++ b/src/gausskernel/po/zh_CN.po @@ -14751,8 +14751,8 @@ msgstr "跨桶索引不支持列存储" #: ../common/backend/parser/parse_utilcmd.cpp:4287 #, c-format -msgid "Global partition index only support btree." -msgstr "全局分区索引只支持btree." +msgid "Global partition index only support btree and ubtree." +msgstr "全局分区索引只支持btree和ubtree." #: ../common/backend/parser/parse_utilcmd.cpp:4297 #, c-format diff --git a/src/test/regress/expected/cstore_unique_index.out b/src/test/regress/expected/cstore_unique_index.out index fdaa96f4ac..d75f8951e7 100644 --- a/src/test/regress/expected/cstore_unique_index.out +++ b/src/test/regress/expected/cstore_unique_index.out @@ -94,7 +94,7 @@ create unique index on part_t1 (b, c); ERROR: Global partition index does not support column store. -- Fail. Unique index on cstore_table can only be local index. create unique index on part_t1 using cbtree (a, c); -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. create unique index on part_t1 using cbtree (a, c) local; \d part_t1 Table "public.part_t1" diff --git a/src/test/regress/expected/gpi_bitmapscan.out b/src/test/regress/expected/gpi_bitmapscan.out index 0dee3501a8..72393a0514 100644 --- a/src/test/regress/expected/gpi_bitmapscan.out +++ b/src/test/regress/expected/gpi_bitmapscan.out @@ -243,10 +243,10 @@ UPDATE test_part_bitmapand_ginst_btree SET sample = to_tsquery('english', txtsam CREATE UNIQUE INDEX ON test_part_bitmapand_ginst_btree (a) local; -- failed CREATE INDEX qq ON test_part_bitmapand_ginst_btree USING gist (keyword tsquery_ops); -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. CREATE INDEX qq ON test_part_bitmapand_ginst_btree USING gist (keyword tsquery_ops) local; CREATE INDEX ON test_part_bitmapand_ginst_btree USING gist (keyword tsquery_ops); -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. explain (costs off) SELECT keyword FROM test_part_bitmapand_ginst_btree WHERE keyword @> 'new' and a = 10; QUERY PLAN -------------------------------------------------------------------- diff --git a/src/test/regress/expected/gpi_build_index.out b/src/test/regress/expected/gpi_build_index.out index 1f4d7662cd..ce46cc65c9 100644 --- a/src/test/regress/expected/gpi_build_index.out +++ b/src/test/regress/expected/gpi_build_index.out @@ -39,10 +39,10 @@ create index rp_index_global15 on hw_global_index_rp ((c1-c2),c1,c2) global; ERROR: Global partition index does not support EXPRESSION index --succeed create index rp_index_global16 on hw_global_index_rp using hash (c1) global; -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. --fail ERROR: access method "HASH" does not support global indexes create index rp_index_global17 on hw_global_index_rp using gin ((c1-c2),c1,c2) global; -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. --fail ERROR: data type INTEGER has no default operator class for access method "GIN" ??? create unique index CONCURRENTLY rp_index_global18 on hw_global_index_rp (c1) global; ERROR: cannot create concurrent partitioned indexes diff --git a/src/test/regress/expected/gpi_create_constraint.out b/src/test/regress/expected/gpi_create_constraint.out index 36d0bb68cd..f3029c25e6 100644 --- a/src/test/regress/expected/gpi_create_constraint.out +++ b/src/test/regress/expected/gpi_create_constraint.out @@ -41,7 +41,7 @@ CREATE INDEX idx8_gpi_table1 ON gpi_table1 ((c1+10), c2) GLOBAL; ERROR: Global partition index does not support EXPRESSION index --error CREATE INDEX idx9_gpi_table1 ON gpi_table1 USING hash (c1, c2) GLOBAL; -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. --error CREATE INDEX idx10_gpi_table1 ON gpi_table1 (c1); --ok diff --git a/src/test/regress/expected/hash_index_001.out b/src/test/regress/expected/hash_index_001.out index b0f7c85b55..b7d59c9ce7 100644 --- a/src/test/regress/expected/hash_index_001.out +++ b/src/test/regress/expected/hash_index_001.out @@ -161,7 +161,7 @@ NOTICE: index "hash_t4_id2" does not exist, skipping drop index if exists hash_t4_id2_new; NOTICE: index "hash_t4_id2_new" does not exist, skipping create index hash_t4_id1 on hash_table_4 using hash(id) global; -ERROR: Global partition index only support btree. +ERROR: Global partition index only support btree and ubtree. create index hash_t4_id2 on hash_table_4 using hash(id) local ( partition index_t4_p1, -- Gitee From 009166862502f6bce4d9165b4f983874b3becd14 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Fri, 19 Jul 2024 09:37:59 +0800 Subject: [PATCH 065/347] xlog vg name guc --- contrib/ndpplugin/ndpplugin.cpp | 2 +- src/bin/gs_guc/cluster_guc.conf | 3 ++- src/bin/initdb/initdb.cpp | 10 ++++++-- src/common/backend/catalog/catalog.cpp | 8 +++---- src/common/backend/utils/adt/dbsize.cpp | 8 +++---- src/common/backend/utils/cache/relmapper.cpp | 2 +- src/common/backend/utils/init/miscinit.cpp | 8 +++---- .../backend/utils/misc/guc/guc_storage.cpp | 17 ++++++++++--- .../utils/misc/postgresql_single.conf.sample | 3 ++- src/common/port/tool_common.cpp | 4 ++-- .../ddes/adapter/ss_dms_recovery.cpp | 2 +- .../ddes/adapter/ss_reform_common.cpp | 10 ++++---- .../storage/access/transam/cbmparsexlog.cpp | 2 +- .../storage/access/transam/xlog.cpp | 2 +- src/gausskernel/storage/file/fd.cpp | 2 +- .../storage/replication/basebackup.cpp | 6 ++--- src/gausskernel/storage/replication/slot.cpp | 2 +- .../knl/knl_guc/knl_instance_attr_storage.h | 3 ++- .../regress/output/recovery_2pc_tools.source | 3 ++- src/test/regress/pg_regress.cpp | 3 +-- src/test/ss/build_ss_database.sh | 4 ++-- src/test/ss/build_ss_database_common.sh | 8 +++---- src/test/ss/conf_start_dss_inst.sh | 24 ++++++------------- src/test/ss/copy_xlog_to_private_vg.sh | 18 +++++++------- src/test/ss/ss_database_build_env.sh | 4 ++-- 25 files changed, 84 insertions(+), 74 deletions(-) diff --git a/contrib/ndpplugin/ndpplugin.cpp b/contrib/ndpplugin/ndpplugin.cpp index d0662d060f..43e8957751 100644 --- a/contrib/ndpplugin/ndpplugin.cpp +++ b/contrib/ndpplugin/ndpplugin.cpp @@ -1473,7 +1473,7 @@ void _PG_init(void) #ifdef GlobalCache long long au_size; - const char *vg_name = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name + 1; + const char *vg_name = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name + 1; int ret = dss_compare_size(vg_name, &au_size); if (ret != 0 || au_size != DSS_DEFAULT_AU_SIZE) { pthread_mutex_unlock(&g_ndp_instance.mutex); diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index e800cc4995..50a1be2451 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -755,7 +755,8 @@ var_eq_const_selectivity|bool|0,0|NULL|NULL| enable_save_confirmed_lsn|bool|0,0|NULL|NULL| ss_enable_dss|bool|0,0|NULL|NULL| enable_segment|bool|0,0|NULL|NULL| -ss_dss_vg_name|string|0,0|NULL|NULL| +ss_dss_data_vg_name|string|0,0|NULL|NULL| +ss_dss_xlog_vg_name|string|0,0|NULL|NULL| ss_dss_conn_path|string|0,0|NULL|NULL| ss_enable_dms|bool|0,0|NULL|NULL| ss_enable_catalog_centralized|bool|0,0|NULL|NULL| diff --git a/src/bin/initdb/initdb.cpp b/src/bin/initdb/initdb.cpp index fc9ce3f7fb..02361f49b8 100644 --- a/src/bin/initdb/initdb.cpp +++ b/src/bin/initdb/initdb.cpp @@ -1575,9 +1575,15 @@ static void setup_config(void) } if (strlen(vgdata) != 0) { - nRet = sprintf_s(repltok, sizeof(repltok), "ss_dss_vg_name = '%s'", vgdata); + nRet = sprintf_s(repltok, sizeof(repltok), "ss_dss_data_vg_name = '%s'", vgdata); securec_check_ss_c(nRet, "\0", "\0"); - conflines = replace_token(conflines, "#ss_dss_vg_name = ''", repltok); + conflines = replace_token(conflines, "#ss_dss_data_vg_name = ''", repltok); + } + + if (strlen(vglog) != 0) { + nRet = sprintf_s(repltok, sizeof(repltok), "ss_dss_xlog_vg_name = '%s'", vglog); + securec_check_ss_c(nRet, "\0", "\0"); + conflines = replace_token(conflines, "#ss_dss_xlog_vg_name = ''", repltok); } if (socketpath != NULL) { diff --git a/src/common/backend/catalog/catalog.cpp b/src/common/backend/catalog/catalog.cpp index fe2185d4e7..dff643ff37 100644 --- a/src/common/backend/catalog/catalog.cpp +++ b/src/common/backend/catalog/catalog.cpp @@ -237,7 +237,7 @@ char* relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum) if (ENABLE_DSS) { errno_t rc = snprintf_s(datadir, MAXPGPATH, MAXPGPATH - 1, "%s/", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); securec_check_ss(rc, "\0", "\0"); } else { datadir[0] = '\0'; @@ -595,9 +595,9 @@ RelFileNodeForkNum relpath_to_filenode(char* path) char *tmppath = NULL; char *newpath = parsepath; uint32 pathsize = (uint32)strlen(parsepath); - uint32 homesize = (uint32)strlen(g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + uint32 homesize = (uint32)strlen(g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); if ((pathsize <= homesize + 1) || - (strncmp(path, g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, homesize) != 0)) { + (strncmp(path, g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, homesize) != 0)) { pfree(parsepath); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid relation file path %s.", path))); } @@ -717,7 +717,7 @@ char* GetDatabasePath(Oid dbNode, Oid spcNode) if (ENABLE_DSS) { rc = snprintf_s(datadir, MAXPGPATH, MAXPGPATH - 1, "%s/", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); securec_check_ss(rc, "\0", "\0"); } else { datadir[0] = '\0'; diff --git a/src/common/backend/utils/adt/dbsize.cpp b/src/common/backend/utils/adt/dbsize.cpp index 4b70ebe455..cc89ec2a5d 100644 --- a/src/common/backend/utils/adt/dbsize.cpp +++ b/src/common/backend/utils/adt/dbsize.cpp @@ -174,7 +174,7 @@ static int64 calculate_database_size(Oid dbOid) /* Get the vgname in DSS mode */ if (ENABLE_DSS) - dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name; /* Shared storage in pg_global is not counted */ @@ -633,14 +633,14 @@ static int64 calculate_tablespace_size(Oid tblspcOid) if (tblspcOid == DEFAULTTABLESPACE_OID) { if (ENABLE_DSS) { rc = snprintf_s(tblspcPath, MAXPGPATH, MAXPGPATH - 1, "%s/base", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); } else { rc = snprintf_s(tblspcPath, MAXPGPATH, MAXPGPATH - 1, "base"); } } else if (tblspcOid == GLOBALTABLESPACE_OID) { if (ENABLE_DSS) { rc = snprintf_s(tblspcPath, MAXPGPATH, MAXPGPATH - 1, "%s/global", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); } else { rc = snprintf_s(tblspcPath, MAXPGPATH, MAXPGPATH - 1, "global"); } @@ -650,7 +650,7 @@ static int64 calculate_tablespace_size(Oid tblspcOid) if (ENABLE_DSS) { rc = snprintf_s( tblspcPath, MAXPGPATH, MAXPGPATH - 1, "%s/pg_tblspc/%u/%s", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, tblspcOid, TABLESPACE_VERSION_DIRECTORY); } else { rc = snprintf_s(tblspcPath, diff --git a/src/common/backend/utils/cache/relmapper.cpp b/src/common/backend/utils/cache/relmapper.cpp index f8e4823704..74b5b7cc34 100644 --- a/src/common/backend/utils/cache/relmapper.cpp +++ b/src/common/backend/utils/cache/relmapper.cpp @@ -491,7 +491,7 @@ void RelationMapFinishBootstrap(void) if (ENABLE_DSS) { char map_file_name[MAXPGPATH]; int rc = snprintf_s(map_file_name, sizeof(map_file_name), sizeof(map_file_name) - 1, "%s/global/%s", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, RELMAPPER_FILENAME); + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, RELMAPPER_FILENAME); securec_check_ss_c(rc, "\0", "\0"); struct stat st; diff --git a/src/common/backend/utils/init/miscinit.cpp b/src/common/backend/utils/init/miscinit.cpp index bd2e1763a9..9830191be1 100644 --- a/src/common/backend/utils/init/miscinit.cpp +++ b/src/common/backend/utils/init/miscinit.cpp @@ -2124,7 +2124,7 @@ void initDssPath(char *dssdir) rc = snprintf_s(g_instance.datadir_cxt.multixactDir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_multixact", dssdir); securec_check_ss(rc, "", ""); - rc = snprintf_s(g_instance.datadir_cxt.xlogDir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_xlog%d", dssdir, + rc = snprintf_s(g_instance.datadir_cxt.xlogDir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_xlog%d", g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name, g_instance.attr.attr_storage.dms_attr.instance_id); securec_check_ss(rc, "", ""); @@ -2150,14 +2150,14 @@ void initDSSConf(void) // check whether dss connect is successful. struct stat st; - if (stat(g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, &st) != 0 || !S_ISDIR(st.st_mode)) { + if (stat(g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, &st) != 0 || !S_ISDIR(st.st_mode)) { ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("Could not connect dssserver, vgname: \"%s\", socketpath: \"%s\"", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, g_instance.attr.attr_storage.dss_attr.ss_dss_conn_path), errhint("Check vgname and socketpath and restart later."))); } else { - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name; // do not overwrite if (strncmp(g_instance.datadir_cxt.baseDir, dssdir, strlen(dssdir)) != 0) { diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 06f573f0c1..14ae306e04 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -4926,13 +4926,24 @@ static void InitStorageConfigureNamesString() check_logical_decode_options_default, assign_logical_decode_options_default, NULL}, - {{"ss_dss_vg_name", + {{"ss_dss_data_vg_name", PGC_POSTMASTER, NODE_SINGLENODE, SHARED_STORAGE_OPTIONS, - gettext_noop("Sets the vg name of DSS node."), + gettext_noop("the data vg name of DSS node."), NULL}, - &g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, + &g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, + "", + check_ss_dss_vg_name, + NULL, + NULL}, + {{"ss_dss_xlog_vg_name", + PGC_POSTMASTER, + NODE_SINGLENODE, + SHARED_STORAGE_OPTIONS, + gettext_noop("the xlog vg name of DSS node."), + NULL}, + &g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name, "", check_ss_dss_vg_name, NULL, diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample index f23618c82c..100e853c1b 100644 --- a/src/common/backend/utils/misc/postgresql_single.conf.sample +++ b/src/common/backend/utils/misc/postgresql_single.conf.sample @@ -833,7 +833,8 @@ job_queue_processes = 10 # Number of concurrent jobs, optional: [0..1000] #ss_enable_aio = on #ss_enable_catalog_centralized = on #ss_instance_id = 0 -#ss_dss_vg_name = '' +#ss_dss_data_vg_name = '' +#ss_dss_xlog_vg_name = '' #ss_dss_conn_path = '' #ss_interconnect_channel_count = 16 #ss_work_thread_count = 32 diff --git a/src/common/port/tool_common.cpp b/src/common/port/tool_common.cpp index 84dc0bfbb7..00d28b3871 100644 --- a/src/common/port/tool_common.cpp +++ b/src/common/port/tool_common.cpp @@ -470,7 +470,7 @@ void freefile(char** lines) /* * read ss config, return enable_dss -* we will get ss_enable_dss, ss_dss_conn_path and ss_dss_vg_name. +* we will get ss_enable_dss, ss_dss_conn_path and ss_dss_data_vg_name. */ bool ss_read_config(const char* pg_data) { @@ -511,7 +511,7 @@ bool ss_read_config(const char* pg_data) ss_instance_config.dss.socketpath = (char*)malloc(sizeof(char) * MAXPGPATH); ss_instance_config.dss.vgname = (char*)malloc(sizeof(char) * MAXPGPATH); (void)find_guc_optval((const char**)optlines, "ss_dss_conn_path", ss_instance_config.dss.socketpath); - (void)find_guc_optval((const char**)optlines, "ss_dss_vg_name", ss_instance_config.dss.vgname); + (void)find_guc_optval((const char**)optlines, "ss_dss_data_vg_name", ss_instance_config.dss.vgname); (void)find_guc_optval((const char**)optlines, "ss_instance_id", inst_id); ss_instance_config.dss.instance_id = atoi(inst_id); diff --git a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp index d33fa41eef..ca8f7df0b4 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp @@ -245,7 +245,7 @@ void ss_failover_dw_init_internal() * step 3: rebuild dw file and init self dw */ - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name; int old_primary_id = g_instance.dms_cxt.SSReformerControl.primaryInstId; int self_id = g_instance.attr.attr_storage.dms_attr.instance_id; if (!g_instance.dms_cxt.SSRecoveryInfo.startup_reform) { diff --git a/src/gausskernel/ddes/adapter/ss_reform_common.cpp b/src/gausskernel/ddes/adapter/ss_reform_common.cpp index 19f22e5935..633588106b 100644 --- a/src/gausskernel/ddes/adapter/ss_reform_common.cpp +++ b/src/gausskernel/ddes/adapter/ss_reform_common.cpp @@ -182,7 +182,7 @@ XLogReaderState *SSXLogReaderAllocate(XLogPageReadCB pagereadfunc, void *private void SSGetRecoveryXlogPath() { errno_t rc = EOK; - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name; rc = snprintf_s(g_instance.dms_cxt.SSRecoveryInfo.recovery_xlog_dir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_xlog%d", dssdir, g_instance.dms_cxt.SSRecoveryInfo.recovery_inst_id); @@ -230,10 +230,10 @@ void SSDisasterGetXlogPathList() } struct dirent *entry; - DIR* dssdir = opendir(g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + DIR* dssdir = opendir(g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name); if (dssdir == NULL) { ereport(PANIC, (errcode_for_file_access(), errmsg("[SS] Error opening dssdir %s", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name))); + g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name))); } uint8_t len = strlen("pg_xlog"); @@ -244,7 +244,7 @@ void SSDisasterGetXlogPathList() rc = memmove_s(entry->d_name, MAX_PATH, entry->d_name + len, strlen(entry->d_name) - len + 1); securec_check_c(rc, "\0", "\0"); rc = snprintf_s(g_instance.dms_cxt.SSRecoveryInfo.xlog_list[index++], MAXPGPATH, MAXPGPATH - 1, - "%s/%s%d", g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, "pg_xlog", atoi(entry->d_name)); + "%s/%s%d", g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name, "pg_xlog", atoi(entry->d_name)); securec_check_ss(rc, "", ""); } } else { @@ -524,7 +524,7 @@ void SSGrantDSSWritePermission(void) pg_usleep(REFORM_WAIT_LONG); ereport(WARNING, (errmodule(MOD_DMS), errmsg("[SS reform] Failed to set DSS as primary, vgname: \"%s\", socketpath: \"%s\"", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name, + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name, g_instance.attr.attr_storage.dss_attr.ss_dss_conn_path), errhint("Check vgname and socketpath and restart later."))); } diff --git a/src/gausskernel/storage/access/transam/cbmparsexlog.cpp b/src/gausskernel/storage/access/transam/cbmparsexlog.cpp index 3c5739dab8..1946d7e0b7 100644 --- a/src/gausskernel/storage/access/transam/cbmparsexlog.cpp +++ b/src/gausskernel/storage/access/transam/cbmparsexlog.cpp @@ -935,7 +935,7 @@ static int CBMXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr targetPageOff = targetPagePtr % XLogSegSize; if (ENABLE_DSS) - dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name; /* * See if we need to switch to a new segment because the requested record diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 2c2be96c3d..0824609e05 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -14618,7 +14618,7 @@ static void CollectTableSpace(DIR *tblspcdir, List **tablespaces, StringInfo tbl const char *dataDir = NULL; int datadirpathlen = -1; - dataDir = is_dss_file(TBLSPCDIR) ? g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name : t_thrd.proc_cxt.DataDir; + dataDir = is_dss_file(TBLSPCDIR) ? g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name : t_thrd.proc_cxt.DataDir; datadirpathlen = strlen(dataDir); /* Collect information about all tablespaces */ while ((de = ReadDir(tblspcdir, "pg_tblspc")) != NULL) { diff --git a/src/gausskernel/storage/file/fd.cpp b/src/gausskernel/storage/file/fd.cpp index a15498bdff..4d67c105c8 100755 --- a/src/gausskernel/storage/file/fd.cpp +++ b/src/gausskernel/storage/file/fd.cpp @@ -878,7 +878,7 @@ int SSErgodicOpenXlogFile(XLogSegNo segno, int fileFlags, int fileMode) { char xlog_file_name[MAXPGPATH]; char xlog_file_full_path[MAXPGPATH]; - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name; DIR* dir; int fd; struct dirent *entry; diff --git a/src/gausskernel/storage/replication/basebackup.cpp b/src/gausskernel/storage/replication/basebackup.cpp index 5c1d105ff4..395bc08d9b 100755 --- a/src/gausskernel/storage/replication/basebackup.cpp +++ b/src/gausskernel/storage/replication/basebackup.cpp @@ -192,7 +192,7 @@ static void send_xlog_location() int rc = 0; if (ENABLE_DSS) { - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name; rc = snprintf_s(fullpath, sizeof(fullpath), sizeof(fullpath) - 1, "%s/pg_xlog%d", dssdir, g_instance.attr.attr_storage.dms_attr.instance_id); } else { @@ -862,7 +862,7 @@ void SendBaseBackup(BaseBackupCmd *cmd) if (ENABLE_DSS) { int rc = 0; char fullpath[MAXPGPATH] = {0}; - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name; rc = snprintf_s(fullpath, MAXPGPATH, MAXPGPATH - 1, "%s/pg_tblspc", dssdir); securec_check_ss(rc, "", ""); @@ -1918,7 +1918,7 @@ static void SendTableSpaceForBackup(basebackup_options* opt, List* tablespaces, { ListCell *lc = NULL; int64 asize = 0; - char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name; + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name; if (ENABLE_DSS) { /* Add a node for all directory in dss*/ diff --git a/src/gausskernel/storage/replication/slot.cpp b/src/gausskernel/storage/replication/slot.cpp index 36a9ec896d..4ff70df04e 100644 --- a/src/gausskernel/storage/replication/slot.cpp +++ b/src/gausskernel/storage/replication/slot.cpp @@ -2630,7 +2630,7 @@ void GetReplslotPath(char *path) { if (ENABLE_DSS) { errno_t rc = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, "%s/pg_replslot", - g_instance.attr.attr_storage.dss_attr.ss_dss_vg_name); + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); securec_check_ss(rc, "\0", "\0"); } else { errno_t rc = strcpy_s(path, MAXPGPATH, "pg_replslot"); diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index b7e0d80d14..06e8db311b 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -91,7 +91,8 @@ typedef struct knl_instance_attr_nvm { typedef struct knl_instance_attr_dss { bool ss_enable_dss; - char* ss_dss_vg_name; + char* ss_dss_data_vg_name; + char* ss_dss_xlog_vg_name; char* ss_dss_conn_path; } knl_instance_attr_dss; diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index d43f31df06..4ecb4f132e 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -644,7 +644,8 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c sql_note | bool | | | sql_use_spacelimit | integer | kB | -1 | 2147483647 ss_dss_conn_path | string | | | - ss_dss_vg_name | string | | | + ss_dss_data_vg_name | string | | | + ss_dss_xlog_vg_name | string | | | ss_enable_aio | bool | | | ss_enable_bcast_snapshot | bool | | | ss_enable_catalog_centralized | bool | | | diff --git a/src/test/regress/pg_regress.cpp b/src/test/regress/pg_regress.cpp index a5d5a6809c..2e06a819a5 100644 --- a/src/test/regress/pg_regress.cpp +++ b/src/test/regress/pg_regress.cpp @@ -1947,9 +1947,8 @@ static void initdb_node_info(bool standby) (void)snprintf(ss_extra_args, sizeof(ss_extra_args), - SYSTEMQUOTE "--vgname=\"+data,+log%i\" --enable-dss --dms_url=\"%s\" -I %d " + SYSTEMQUOTE "--vgname=\"+data,+log\" --enable-dss --dms_url=\"%s\" -I %d " "--socketpath=\"UDS:%s/dss_home%d/.dss_unix_d_socket\"" SYSTEMQUOTE, - i, ss_standby_read ? "0:127.0.0.1:1611,1:127.0.0.1:1711" : "0:127.0.0.1:1611", i, temp_install, diff --git a/src/test/ss/build_ss_database.sh b/src/test/ss/build_ss_database.sh index 035c7360a6..25d0ec7d30 100644 --- a/src/test/ss/build_ss_database.sh +++ b/src/test/ss/build_ss_database.sh @@ -59,9 +59,9 @@ init_gaussdb() { inst_id=$1 dss_home=$2 - echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log${inst_id}\" --enable-dss --dms_url=\"${dms_url}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\"" + echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log\" --enable-dss --dms_url=\"${dms_url}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\"" - ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log${inst_id}" --enable-dss --dms_url="${dms_url}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" + ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log" --enable-dss --dms_url="${dms_url}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" } start_gaussdb() diff --git a/src/test/ss/build_ss_database_common.sh b/src/test/ss/build_ss_database_common.sh index e871040456..701228036b 100644 --- a/src/test/ss/build_ss_database_common.sh +++ b/src/test/ss/build_ss_database_common.sh @@ -91,13 +91,13 @@ init_gaussdb() nodedata_cfg=$4 if [ $# == 5 ]; then dorado_shared_disk=$5 - echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log${inst_id}\" --enable-dss --dms_url=\"${nodedata_cfg}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\" -d -n -g ${dorado_shared_disk}" + echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log\" --enable-dss --dms_url=\"${nodedata_cfg}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\" -d -n -g ${dorado_shared_disk}" - ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log${inst_id}" --enable-dss --dms_url="${nodedata_cfg}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" -d -n -g ${dorado_shared_disk} + ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log" --enable-dss --dms_url="${nodedata_cfg}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" -d -n -g ${dorado_shared_disk} else - echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log${inst_id}\" --enable-dss --dms_url=\"${nodedata_cfg}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\"" + echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log\" --enable-dss --dms_url=\"${nodedata_cfg}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\"" - ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log${inst_id}" --enable-dss --dms_url="${nodedata_cfg}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" + ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log" --enable-dss --dms_url="${nodedata_cfg}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" fi } diff --git a/src/test/ss/conf_start_dss_inst.sh b/src/test/ss/conf_start_dss_inst.sh index 57bac8dc60..b0e0b04eea 100644 --- a/src/test/ss/conf_start_dss_inst.sh +++ b/src/test/ss/conf_start_dss_inst.sh @@ -56,11 +56,7 @@ init_dss_conf() mkdir -p ${dss_home}/log echo "data:${simu_path}/dss_data.dmp" > ${dss_home}/cfg/dss_vg_conf.ini - for i in `seq 0 $last_id` - do - echo "log${i}:${simu_path}/dss_log${i}.dmp" >> ${dss_home}/cfg/dss_vg_conf.ini - done - + echo "log:${simu_path}/dss_log.dmp" >> ${dss_home}/cfg/dss_vg_conf.ini echo "INST_ID = ${inst_id}" > ${dss_home}/cfg/dss_inst.ini echo "_LOG_LEVEL = 255" >> ${dss_home}/cfg/dss_inst.ini echo "_LOG_BACKUP_FILE_COUNT = 128" >> ${dss_home}/cfg/dss_inst.ini @@ -84,22 +80,16 @@ create_vg() truncate -s `expr ${SIMULATE_SIZE} / 1000`G ${simu_path}/dss_data.dmp chmod 777 ${simu_path}/dss_data.dmp - for i in `seq 0 $last_id` - do - echo " =========== truncate `expr ${LOG_SIZE} / 1000`G =========== " -# dd if=/dev/zero bs=1048576 count=${LOG_SIZE} of=${simu_path}/dss_log${i}.dmp - truncate -s `expr ${SIMULATE_SIZE} / 1000`G ${simu_path}/dss_log${i}.dmp - chmod 777 ${simu_path}/dss_log${i}.dmp - done + echo " =========== truncate `expr ${LOG_SIZE} / 1000`G =========== " +# dd if=/dev/zero bs=1048576 count=${LOG_SIZE} of=${simu_path}/dss_log.dmp + truncate -s `expr ${SIMULATE_SIZE} / 1000`G ${simu_path}/dss_log.dmp + chmod 777 ${simu_path}/dss_log.dmp echo "> creating volume group ${simu_path}/dss_data.dmp" ${GAUSSHOME}/bin/dsscmd cv -g data -v ${simu_path}/dss_data.dmp -s 2048 -D ${dss_home} - for i in `seq 0 $last_id` - do - echo "> creating volume group ${simu_path}/dss_log${i}.dmp" - ${GAUSSHOME}/bin/dsscmd cv -g log${i} -v ${simu_path}/dss_log${i}.dmp -s 2048 -D ${dss_home} - done + echo "> creating volume group ${simu_path}/dss_log.dmp" + ${GAUSSHOME}/bin/dsscmd cv -g log -v ${simu_path}/dss_log.dmp -s 2048 -D ${dss_home} } start_dss() diff --git a/src/test/ss/copy_xlog_to_private_vg.sh b/src/test/ss/copy_xlog_to_private_vg.sh index a107dbe259..2c54cb1907 100644 --- a/src/test/ss/copy_xlog_to_private_vg.sh +++ b/src/test/ss/copy_xlog_to_private_vg.sh @@ -11,21 +11,21 @@ main() for inst_id in `seq 0 $last_id` do - ${dsscmd_bin} mkdir +log${inst_id} pg_xlog${inst_id} UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} mkdir +log${inst_id}/pg_xlog${inst_id} archive_status UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} cp +data/pg_xlog${inst_id}/000000010000000000000001 +log${inst_id}/pg_xlog${inst_id}/000000010000000000000001 UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} mkdir +log pg_xlog${inst_id} UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} mkdir +log/pg_xlog${inst_id} archive_status UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} cp +data/pg_xlog${inst_id}/000000010000000000000001 +log/pg_xlog${inst_id}/000000010000000000000001 UDS:${dss_home}/.dss_unix_d_socket ${dsscmd_bin} rm +data/pg_xlog${inst_id}/000000010000000000000001 UDS:${dss_home}/.dss_unix_d_socket ${dsscmd_bin} rmdir +data/pg_xlog${inst_id}/archive_status UDS:${dss_home}/.dss_unix_d_socket ${dsscmd_bin} rmdir +data/pg_xlog${inst_id} UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} ln +log${inst_id}/pg_xlog${inst_id} +data/pg_xlog${inst_id} UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} ln +log/pg_xlog${inst_id} +data/pg_xlog${inst_id} UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} mkdir +log${inst_id} pg_doublewrite${inst_id} UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} cp +data/pg_doublewrite${inst_id}/pg_dw_meta +log${inst_id}/pg_doublewrite${inst_id}/pg_dw_meta UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} cp +data/pg_doublewrite${inst_id}/pg_dw_0 +log${inst_id}/pg_doublewrite${inst_id}/pg_dw_0 UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} cp +data/pg_doublewrite${inst_id}/pg_dw_single +log${inst_id}/pg_doublewrite${inst_id}/pg_dw_single UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} mkdir +log pg_doublewrite${inst_id} UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} cp +data/pg_doublewrite${inst_id}/pg_dw_meta +log/pg_doublewrite${inst_id}/pg_dw_meta UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} cp +data/pg_doublewrite${inst_id}/pg_dw_0 +log/pg_doublewrite${inst_id}/pg_dw_0 UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} cp +data/pg_doublewrite${inst_id}/pg_dw_single +log/pg_doublewrite${inst_id}/pg_dw_single UDS:${dss_home}/.dss_unix_d_socket ${dsscmd_bin} rmdir +data/pg_doublewrite${inst_id} -r UDS:${dss_home}/.dss_unix_d_socket - ${dsscmd_bin} ln +log${inst_id}/pg_doublewrite${inst_id} +data/pg_doublewrite${inst_id} UDS:${dss_home}/.dss_unix_d_socket + ${dsscmd_bin} ln +log/pg_doublewrite${inst_id} +data/pg_doublewrite${inst_id} UDS:${dss_home}/.dss_unix_d_socket ${dsscmd_bin} ls +data/pg_xlog${inst_id} UDS:${dss_home}/.dss_unix_d_socket ${dsscmd_bin} ls +data/pg_doublewrite${inst_id} UDS:${dss_home}/.dss_unix_d_socket diff --git a/src/test/ss/ss_database_build_env.sh b/src/test/ss/ss_database_build_env.sh index 4ff2585317..82b795521f 100644 --- a/src/test/ss/ss_database_build_env.sh +++ b/src/test/ss/ss_database_build_env.sh @@ -82,9 +82,9 @@ init_gaussdb() { inst_id=$1 dss_home=$2 - echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log${inst_id}\" --enable-dss --dms_url=\"${nodedate_cfg}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\"" + echo "${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname=\"+data,+log\" --enable-dss --dms_url=\"${nodedate_cfg}\" -I ${inst_id} --socketpath=\"UDS:${dss_home}/.dss_unix_d_socket\"" - ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log${inst_id}" --enable-dss --dms_url="${nodedate_cfg}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" + ${GAUSSHOME}/bin/gs_initdb -D ${SS_DATA}/dn${inst_id} --nodename=single_node -w ${SUPER_PASSWORD} --vgname="+data,+log" --enable-dss --dms_url="${nodedate_cfg}" -I ${inst_id} --socketpath="UDS:${dss_home}/.dss_unix_d_socket" } set_gausdb_port() -- Gitee From 5c49a0559f9539e82e423cd769da3447c5a531e6 Mon Sep 17 00:00:00 2001 From: gentle_hu Date: Fri, 19 Jul 2024 09:56:26 +0800 Subject: [PATCH 066/347] =?UTF-8?q?=E5=A4=87=E6=9C=BA=E8=AF=BB=E9=A1=B5?= =?UTF-8?q?=E9=9D=A2=E6=97=B6=E5=A6=82=E6=9E=9C=E5=A4=B1=E8=B4=A5=E5=88=99?= =?UTF-8?q?=E6=94=B9=E4=B8=BA=E6=9C=AC=E5=9C=B0=E5=8A=A0X=E9=94=81dms?= =?UTF-8?q?=E5=8A=A0S=E9=94=81=E8=AF=B7=E6=B1=82=EF=BC=8C=E9=98=B2?= =?UTF-8?q?=E6=AD=A2=E5=90=8C=E6=97=B6=E4=B8=BB=E6=9C=BA=E8=AF=B7=E6=B1=82?= =?UTF-8?q?=E5=86=99=E9=A1=B5=E9=9D=A2=E6=97=B6=E6=8B=BF=E4=B8=8D=E5=88=B0?= =?UTF-8?q?=E9=94=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ddes/adapter/ss_dms_bufmgr.cpp | 6 +-- src/gausskernel/storage/buffer/bufmgr.cpp | 40 ++++++++++++++++++- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp index 606fe99073..644751ff6b 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp @@ -1019,7 +1019,7 @@ void SSMarkBufferDirtyForERTO(RedoBufferInfo* bufferinfo) bool SSLWLockAcquireTimeout(LWLock* lock, LWLockMode mode) { bool get_lock = false; - int wait_tickets = 1000; + int wait_tickets = (SS_PRIMARY_MODE) ? 500 : 5000; int cur_tickets = 0; do { @@ -1036,8 +1036,8 @@ bool SSLWLockAcquireTimeout(LWLock* lock, LWLockMode mode) } while (true); if (!get_lock) { - ereport(WARNING, (errcode(MOD_DMS), (errmsg("[SS lwlock] request LWLock:%p timeout, LWLockMode:%d, timeout:1s", - lock, mode)))); + ereport(WARNING, (errcode(MOD_DMS), (errmsg("[SS lwlock] request LWLock:%p timeout, LWLockMode:%d, timeout:%dms", + lock, mode, wait_tickets)))); } return get_lock; } diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index fadb38ceb0..0933a55a3b 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -6248,6 +6248,8 @@ void LockBuffer(Buffer buffer, int mode) { volatile BufferDesc *buf = NULL; bool need_update_lockid = false; + bool dms_standby_retry_read = false; + int origin_mode = mode; Assert(BufferIsValid(buffer)); if (BufferIsLocal(buffer)) { @@ -6275,7 +6277,7 @@ retry: * need to transfer newest page version by DMS. */ if (ENABLE_DMS && mode != BUFFER_LOCK_UNLOCK) { - LWLockMode lock_mode = (mode == BUFFER_LOCK_SHARE) ? LW_SHARED : LW_EXCLUSIVE; + LWLockMode lock_mode = (origin_mode == BUFFER_LOCK_SHARE) ? LW_SHARED : LW_EXCLUSIVE; Buffer tmp_buffer; dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(buffer - 1); ReadBufferMode read_mode = RBM_NORMAL; @@ -6312,10 +6314,44 @@ retry: g_instance.dms_cxt.SSRecoveryInfo.recovery_trapped_in_page_request = true; } + /* + * If two standby sessions request to read a page at the same time, and the primary session + * requests to write to the page, it is easy to enter a deadlock state. + * + * Because the two sessions on the standby node will hold the content lock at the shared level, + * at the same time, even if one of them fails, release the lock and sleep, the other will hold + * it during this time, and the MES thread from the host will never get the exclusive lock on + * this page. + * + * However, the session on the primary side holds the exclusive lock, which prevents the MES + * for standby from taking the shared lock, which eventually leads to a deadlock. + * + * Therefore, after the standby session failed to get the page from dms for the first time, + * the local content lock is changed to exclusive, in this way, the standby session will not + * hold the content shared lock all the time, give the MES from the primary a chance to get it, + * and the timeout time of the primary and standby servers is modified to open the unlocking + * time window. + */ + if (!dms_standby_retry_read && SS_STANDBY_MODE) { + dms_standby_retry_read = true; + mode = BUFFER_LOCK_EXCLUSIVE; + } pg_usleep(5000L); goto retry; - } + } else if (dms_standby_retry_read) { + /* + * We're on standby, and we have got the page, but we're holding an exclusive lock, + * which isn't good, so release the lock and start over. + * + * A good idea would be to add the ability to lock downgrade for LWLock. + */ + mode = origin_mode; + dms_standby_retry_read = false; + LWLockRelease(buf->content_lock); + goto retry; + } } + } /* -- Gitee From 315df77d0609471e8d3ab51b95c99eafd19e3ecf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Fri, 19 Jul 2024 15:39:14 +0800 Subject: [PATCH 067/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dindex=20include?= =?UTF-8?q?=E9=9D=9E=E9=94=AE=E5=88=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/indexcmds.cpp | 45 ++++++++++++++++++- .../regress/expected/test_index_include.out | 12 +++++ src/test/regress/parallel_schedule0B | 2 +- src/test/regress/sql/test_index_include.sql | 13 ++++++ 4 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 src/test/regress/expected/test_index_include.out create mode 100644 src/test/regress/sql/test_index_include.sql diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp index 666610e3a6..7d09b42ee4 100644 --- a/src/gausskernel/optimizer/commands/indexcmds.cpp +++ b/src/gausskernel/optimizer/commands/indexcmds.cpp @@ -648,6 +648,48 @@ static bool parseVisibleStateFromOptions(List* options) return visible; } +static bool equalIncludeIndex(IndexElem* index1, IndexElem* index2) +{ + SortByNulls no = index1->nulls_ordering; + // ignore nulls_ordering + index1->nulls_ordering = index2->nulls_ordering; + + bool res = equal(index1, index2); + index1->nulls_ordering = no; + return res; +} + +static bool listMemberIncludeIndex(List* list, IndexElem* index) +{ + ListCell* cell = NULL; + + foreach (cell, list) { + if (equalIncludeIndex((IndexElem *)lfirst(cell), index)) { + return true; + } + } + + return false; +} + +static List* listIntersectionIncludeIndex(List *list1, List *list2) +{ + List *result = NIL; + ListCell *cell = NULL; + + if (list1 == NIL || list2 == NIL) { + return NIL; + } + + foreach (cell, list1) { + if (listMemberIncludeIndex(list2, (IndexElem *)lfirst(cell))) { + result = lappend(result, lfirst(cell)); + } + } + + return result; +} + /* * DefineIndex * Creates a new index. @@ -937,7 +979,8 @@ ObjectAddress DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, errmsg("create a index with include columns is only supported in ubtree"))); } - if (list_intersection(stmt->indexParams, stmt->indexIncludingParams) != NIL) { + if ((u_sess->attr.attr_sql.dolphin && listIntersectionIncludeIndex(stmt->indexParams, stmt->indexIncludingParams) != NIL) || + (list_intersection(stmt->indexParams, stmt->indexIncludingParams) != NIL)) { ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("included columns must not intersect with key columns"))); diff --git a/src/test/regress/expected/test_index_include.out b/src/test/regress/expected/test_index_include.out new file mode 100644 index 0000000000..d31b4feaa5 --- /dev/null +++ b/src/test/regress/expected/test_index_include.out @@ -0,0 +1,12 @@ +create schema test_index_include; +set current_schema to test_index_include; +create database dbb dbcompatibility 'b'; +\c dbb +create table t1 (id int not null, name text) with (fillfactor=20, orientation=row, storage_type=ustore) nocompress tablespace pg_default; +insert into t1 values(generate_series(1,9), 'col' || generate_series(1,9)); +create index i1 on t1 using ubtree(id) include(id) with(indexsplit=insertpt) tablespace pg_default where id > 10; +ERROR: included columns must not intersect with key columns +\c regression +drop database if exists dbb; +reset current_schema; +drop schema test_index_include cascade; diff --git a/src/test/regress/parallel_schedule0B b/src/test/regress/parallel_schedule0B index fdbd2e30b6..0d8126a406 100644 --- a/src/test/regress/parallel_schedule0B +++ b/src/test/regress/parallel_schedule0B @@ -457,5 +457,5 @@ test: udf_crem create_c_function # procedure, Function Test #test: create_procedure test: create_function -test: pg_compatibility +test: pg_compatibility test_index_include diff --git a/src/test/regress/sql/test_index_include.sql b/src/test/regress/sql/test_index_include.sql new file mode 100644 index 0000000000..1fd769a060 --- /dev/null +++ b/src/test/regress/sql/test_index_include.sql @@ -0,0 +1,13 @@ +create schema test_index_include; +set current_schema to test_index_include; + +create database dbb dbcompatibility 'b'; +\c dbb +create table t1 (id int not null, name text) with (fillfactor=20, orientation=row, storage_type=ustore) nocompress tablespace pg_default; +insert into t1 values(generate_series(1,9), 'col' || generate_series(1,9)); +create index i1 on t1 using ubtree(id) include(id) with(indexsplit=insertpt) tablespace pg_default where id > 10; + +\c regression +drop database if exists dbb; +reset current_schema; +drop schema test_index_include cascade; -- Gitee From c82d9e72ac72c6f549b0b562bf169c1a57ccd32b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Fri, 19 Jul 2024 18:15:26 +0800 Subject: [PATCH 068/347] =?UTF-8?q?guc=E5=8F=82=E6=95=B0disable=5Fkeyword?= =?UTF-8?q?=5Foptions=E9=80=82=E9=85=8D=E6=8F=92=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc/guc_sql.cpp | 23 +++++++++++++------ src/include/knl/knl_session.h | 1 + .../regress/expected/ignore_keyword_list.out | 3 +-- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index 8a989d8201..6c2249f116 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -3693,6 +3693,19 @@ static void assign_behavior_compat_options(const char* newval, void* extra) u_sess->utils_cxt.behavior_compat_flags = result; } +typedef int16 (*getIgnoreKeywordTokenHook)(const char *item); + +static int get_ignore_keyword_token(const char *item) +{ + int token = -1; + if (u_sess->hook_cxt.getIgnoreKeywordTokenHook != NULL) { + token = ((getIgnoreKeywordTokenHook)u_sess->hook_cxt.getIgnoreKeywordTokenHook)(item); + } else { + token = semtc_get_ignore_keyword_token(item); + } + return token; +} + static bool check_disable_keyword_options(char **newval, void **extra, GucSource source) { char *rawstring = NULL; @@ -3714,13 +3727,9 @@ static bool check_disable_keyword_options(char **newval, void **extra, GucSource foreach(cell, elemlist) { const char *item = (const char *)lfirst(cell); - int token = semtc_get_ignore_keyword_token(item); + int token = get_ignore_keyword_token(item); if (token < 0) { - GUC_check_errdetail("invalid disable keyword \"%s\"", item); - MemoryContextSwitchTo(old_context); - pfree(rawstring); - list_free(elemlist); - return false; + ereport(WARNING, (errmsg("invalid disable keyword \"%s\", will be skipped.", item))); } } MemoryContextSwitchTo(old_context); @@ -3747,7 +3756,7 @@ static void assign_disable_keyword_options(const char *newval, void *extra) { const char *item = (const char *)lfirst(cell); - int token = semtc_get_ignore_keyword_token(item); + int token = get_ignore_keyword_token(item); if (token >= 0) { result = lappend_int(result, token); } diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 0eb7b65522..da946348dc 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -2932,6 +2932,7 @@ typedef struct knl_u_hook_context { void *groupingplannerHook; void *replaceNullOrNotHook; void *nullsMinimalPolicyHook; + void *getIgnoreKeywordTokenHook; } knl_u_hook_context; typedef struct knl_u_libsw_context { diff --git a/src/test/regress/expected/ignore_keyword_list.out b/src/test/regress/expected/ignore_keyword_list.out index 3445064cf2..8e06defea3 100755 --- a/src/test/regress/expected/ignore_keyword_list.out +++ b/src/test/regress/expected/ignore_keyword_list.out @@ -12,8 +12,7 @@ select 1 interval; --ok (1 row) set disable_keyword_options = 'intervalxx'; -ERROR: invalid value for parameter "disable_keyword_options": "intervalxx" -DETAIL: invalid disable keyword "intervalxx" +WARNING: invalid disable keyword "intervalxx", will be skipped. set disable_keyword_options = 'interval,interval'; select 1 interval; --ok interval -- Gitee From 0c915621d2d08cd3ff66ab361830cfa713c26b39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Fri, 19 Jul 2024 19:04:54 +0800 Subject: [PATCH 069/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dguc=E5=8F=82=E6=95=B0?= =?UTF-8?q?behavior=5Fcompat=5Foptions=E9=87=8D=E5=A4=8D=E8=AE=BE=E7=BD=AE?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc/guc_sql.cpp | 3 ++- src/test/regress/expected/numeric_hide_tailing_zero.out | 7 +++++++ src/test/regress/sql/numeric_hide_tailing_zero.sql | 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index 8a989d8201..0793210ca7 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -3682,7 +3682,8 @@ static void assign_behavior_compat_options(const char* newval, void* extra) for (start = 0; start < OPT_MAX; start++) { const char* item = (const char*)lfirst(cell); - if (strcmp(item, behavior_compat_options[start].name) == 0) + if (strcmp(item, behavior_compat_options[start].name) == 0 + && (result & behavior_compat_options[start].flag) == 0) result += behavior_compat_options[start].flag; } } diff --git a/src/test/regress/expected/numeric_hide_tailing_zero.out b/src/test/regress/expected/numeric_hide_tailing_zero.out index f621c39704..2b619fad3c 100644 --- a/src/test/regress/expected/numeric_hide_tailing_zero.out +++ b/src/test/regress/expected/numeric_hide_tailing_zero.out @@ -24,6 +24,13 @@ select cast(009.0000 as numeric(15,10)); 9 (1 row) +set behavior_compat_options='hide_tailing_zero,hide_tailing_zero'; +select cast(123.123 as numeric(15,10)); + numeric +--------- + 123.123 +(1 row) + set behavior_compat_options=''; set behavior_compat_options='truncate_numeric_tail_zero'; create table test_num_zero (a number,b int); diff --git a/src/test/regress/sql/numeric_hide_tailing_zero.sql b/src/test/regress/sql/numeric_hide_tailing_zero.sql index a683cadfe6..63c16e5ecf 100644 --- a/src/test/regress/sql/numeric_hide_tailing_zero.sql +++ b/src/test/regress/sql/numeric_hide_tailing_zero.sql @@ -4,6 +4,8 @@ set behavior_compat_options='hide_tailing_zero'; select cast(123.123 as numeric(15,10)); select cast(0 as numeric(15,10)); select cast(009.0000 as numeric(15,10)); +set behavior_compat_options='hide_tailing_zero,hide_tailing_zero'; +select cast(123.123 as numeric(15,10)); set behavior_compat_options=''; set behavior_compat_options='truncate_numeric_tail_zero'; -- Gitee From 2255a36d382752188329ff84c6732e039999ff91 Mon Sep 17 00:00:00 2001 From: wangxudong Date: Fri, 19 Jul 2024 15:32:54 +0800 Subject: [PATCH 070/347] =?UTF-8?q?DSS=E6=8E=A8=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 4 ++-- src/test/ss/conf_start_dss_inst.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index e0e6760991..0fa5966c29 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ dms_commit_id=1dcd3d829655f517e24ba753c770f1b45cd5edf6 -dss_commit_id=04b09e0414525a7bb78b8baaabfedcc675f91102 -cbb_commit_id=d22f1e92dc9fc75efa8f0f027321faf0fd1c770b +dss_commit_id=b714d964156722f4353840c0f5bb66c92158e3c6 +cbb_commit_id=fff8dd7eb442c6aa9f50cd88cd7f3c2357325a3e diff --git a/src/test/ss/conf_start_dss_inst.sh b/src/test/ss/conf_start_dss_inst.sh index 28e965f922..4a1c58c8be 100644 --- a/src/test/ss/conf_start_dss_inst.sh +++ b/src/test/ss/conf_start_dss_inst.sh @@ -62,7 +62,7 @@ init_dss_conf() echo "_LOG_BACKUP_FILE_COUNT = 128" >> ${dss_home}/cfg/dss_inst.ini echo "_LOG_MAX_FILE_SIZE = 100M" >> ${dss_home}/cfg/dss_inst.ini echo "LSNR_PATH = ${dss_home}" >> ${dss_home}/cfg/dss_inst.ini - echo "DISK_LOCK_FILE_PATH = ${lock_path}" >> ${dss_home}/cfg/dss_inst.ini + echo "_DISK_LOCK_FILE_PATH = ${lock_path}" >> ${dss_home}/cfg/dss_inst.ini echo "DSS_NODES_LIST = ${mes_cfg}" >> ${dss_home}/cfg/dss_inst.ini } -- Gitee From 636fd721c7672718421d8267cb81ec81653198c1 Mon Sep 17 00:00:00 2001 From: shirley_zhengx Date: Sat, 20 Jul 2024 10:10:27 +0800 Subject: [PATCH 071/347] fix reform stay PM_WAIT_BACKENDS and log style --- .../ddes/adapter/ss_dms_callback.cpp | 39 ++++++++++--------- src/gausskernel/po/zh_CN.po | 4 +- .../process/postmaster/postmaster.cpp | 5 ++- .../storage/access/transam/xlog.cpp | 20 +++++----- 4 files changed, 35 insertions(+), 33 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 253f582c68..20ff559cb9 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -493,31 +493,32 @@ static void CBSwitchoverResult(void *db_handle, int result) static int SetPrimaryIdOnStandby(int primary_id, unsigned long long list_stable) { + char* type_string = NULL; + type_string = SSGetLogHeaderTypeStr(); + for (int ntries = 0;; ntries++) { SSReadControlFile(REFORM_CTRL_PAGE); /* need to double check */ if (g_instance.dms_cxt.SSReformerControl.primaryInstId == primary_id && g_instance.dms_cxt.SSReformerControl.list_stable == list_stable) { ereport(LOG, (errmodule(MOD_DMS), - errmsg("[SS %s] Reform success, this is a standby:%d confirming new primary:%d, list_stable:%llu, " - "confirm ntries=%d.", - SS_PERFORMING_SWITCHOVER ? "switchover" : "reform", SS_MY_INST_ID, primary_id, list_stable, - ntries))); + errmsg("%s Reform success, this is a standby:%d confirming new primary:%d, list_stable:%llu, " + "confirm ntries=%d.", type_string, SS_MY_INST_ID, primary_id, list_stable, ntries))); return DMS_SUCCESS; } else { if (dms_reform_failed()) { ereport(ERROR, - (errmodule(MOD_DMS), errmsg("[SS %s] Failed to confirm new primary: %d, list_stable:%llu, " + (errmodule(MOD_DMS), errmsg("%s Failed to confirm new primary: %d, list_stable:%llu, " "control file indicates primary is %d, list_stable%llu; dms reform failed.", - SS_PERFORMING_SWITCHOVER ? "switchover" : "reform", (int)primary_id, list_stable, + type_string, (int)primary_id, list_stable, g_instance.dms_cxt.SSReformerControl.primaryInstId, g_instance.dms_cxt.SSReformerControl.list_stable))); return DMS_ERROR; } if (ntries >= WAIT_REFORM_CTRL_REFRESH_TRIES) { ereport(ERROR, - (errmodule(MOD_DMS), errmsg("[SS %s] Failed to confirm new primary: %d, list_stable:%llu, " + (errmodule(MOD_DMS), errmsg("%s Failed to confirm new primary: %d, list_stable:%llu, " " control file indicates primary is %d, list_stable%llu; wait timeout.", - SS_PERFORMING_SWITCHOVER ? "switchover" : "reform", (int)primary_id, list_stable, + type_string, (int)primary_id, list_stable, g_instance.dms_cxt.SSReformerControl.primaryInstId, g_instance.dms_cxt.SSReformerControl.list_stable))); return DMS_ERROR; @@ -552,8 +553,8 @@ static int CBSaveStableList(void *db_handle, unsigned long long list_stable, uns LWLockRelease(ControlFileLock); Assert(g_instance.dms_cxt.SSReformerControl.primaryInstId == (int)primary_id); - ereport(LOG, (errmodule(MOD_DMS), errmsg("[SS %s] set current instance:%d as primary, list_stable:%llu.", - SS_PERFORMING_SWITCHOVER ? "switchover" : "reform", primary_id, list_stable))); + ereport(LOG, (errmodule(MOD_DMS), errmsg("[SS reform] set current instance:%d as primary, list_stable:%llu.", + primary_id, list_stable))); ret = DMS_SUCCESS; } else { /* we are on standby */ LWLockRelease(ControlFileLock); @@ -1999,6 +2000,15 @@ static int CBReformDoneNotify(void *db_handle) g_instance.dms_cxt.SSReformInfo.reform_end_time = GetCurrentTimestamp(); g_instance.dms_cxt.SSReformInfo.reform_success = true; + /* + * Only two kind of condition: + * 1.Primary or standby restart in single node mode, other nodes in cluster is set PM_WAIT_REFORM. + * 2.In failover, standby no promoting as priamey is set PM_WAIT_BACKENDS. + */ + if (pmState == PM_WAIT_REFORM || + (SS_PERFORMING_FAILOVER && SS_STANDBY_MODE && pmState == PM_WAIT_BACKENDS)) { + pmState = PM_RUN; + } ereport(LOG, (errmodule(MOD_DMS), errmsg("[SS reform] Reform success, instance:%d is running.", @@ -2011,15 +2021,6 @@ static int CBReformDoneNotify(void *db_handle) g_instance.dms_cxt.SSRecoveryInfo.realtime_build_in_reform = false; g_instance.dms_cxt.SSReformInfo.in_reform = false; - /* - * Only two kand of condition: - * 1.Primary or standby restart in single node mode, other nodes in cluster is set PM_WAIT_REFORM. - * 2.In failover, standby no promoting as priamey is set PM_WAIT_BACKENDS. - */ - if (pmState == PM_WAIT_REFORM || - (SS_PERFORMING_FAILOVER && SS_STANDBY_MODE && pmState == PM_WAIT_BACKENDS)) { - pmState = PM_RUN; - } ereport(LOG, (errmodule(MOD_DMS), errmsg("[SS reform] reform done: pmState=%d, SSClusterState=%d, demotion=%d-%d, " "rec=%d, dmsStatus=%d.", pmState, g_instance.dms_cxt.SSClusterState, g_instance.demotion, t_thrd.walsender_cxt.WalSndCtl->demotion, diff --git a/src/gausskernel/po/zh_CN.po b/src/gausskernel/po/zh_CN.po index 40bbdfd582..0a06f104d8 100644 --- a/src/gausskernel/po/zh_CN.po +++ b/src/gausskernel/po/zh_CN.po @@ -89547,12 +89547,12 @@ msgstr "[SS reform] EndOfLog:%lu." #: storage/access/transam/xlog.cpp:11006 #, c-format -msgid "[SS switchover/SS failover/SS normal reform] start full checkpoint." +msgid "[SS reform][SS failover][SS switchover] start full checkpoint." msgstr "[SS切换/SS故障切换/SS正常reform]启动完整检查点。" #: storage/access/transam/xlog.cpp:11015 #, c-format -msgid "[SS switchover/SS failover/SS normal reform] finished full checkpointand update control file" +msgid "[SS reform][SS failover][SS switchover] finished full checkpointand update control file" msgstr "[SS切换/SS故障切换/SS正常reform]完成完整检查点和更新控制文件" #: storage/access/transam/xlog.cpp:11047 diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index 8a081a8967..ab328fa98f 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -3967,7 +3967,8 @@ static int ServerLoop(void) g_instance.dms_cxt.SSRecoveryInfo.ondemand_realtime_build_status = READY_TO_BUILD; g_instance.pid_cxt.StartupPID = initialize_util_thread(STARTUP); Assert(g_instance.pid_cxt.StartupPID != 0); - ereport(LOG, (errmsg("[On-demand] Node:%d ondemand realtime build start, set status to READY_TO_BUILD.", SS_MY_INST_ID))); + ereport(LOG, (errmsg("[SS][On-demand] Node:%d ondemand realtime build start, " + "set status to READY_TO_BUILD.", SS_MY_INST_ID))); } } @@ -15425,7 +15426,7 @@ void SSOndemandProcExitIfStayWaitBackends() } if (pmState == PM_WAIT_BACKENDS) { ereport(WARNING, (errmodule(MOD_DMS), - errmsg("[On-demand] Proc exit because pmState stay PM_WAIT_BACKENDS for %d times, " + errmsg("[SS reform][On-demand] Proc exit because pmState stay PM_WAIT_BACKENDS for %d times, " "when reform failed and in ondemand recovery, to avoid pmState being stuck in PM_WAIT_BACKENDS.", WAIT_PMSTATE_UPDATE_TRIES))); _exit(0); diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 71004524b9..dd83fd1dbb 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -9324,11 +9324,11 @@ void StartupXLOG(void) SSReadControlFile(REFORM_CTRL_PAGE); if (SS_CLUSTER_ONDEMAND_NOT_NORAML && SS_PRIMARY_MODE) { if (SS_STANDBY_PROMOTING) { - ereport(FATAL, (errmsg("[On-demand] Do not allow switchover if ondemand recovery is not finish"))); + ereport(FATAL, (errmsg("[SS reform][On-demand] Do not allow switchover if ondemand recovery is not finish"))); } Assert(g_instance.dms_cxt.SSReformerControl.recoveryInstId != INVALID_INSTANCEID); src_id = g_instance.dms_cxt.SSReformerControl.recoveryInstId; - ereport(LOG, (errmsg("[On-demand]: Ondemand recovery do not finish in last reform, " + ereport(LOG, (errmsg("[SS reform][On-demand]: Ondemand recovery do not finish in last reform, " "reading control file of original primary:%d", src_id))); SSOndemandRecoveryExitNormal = false; } else if (SS_DORADO_CLUSTER) { @@ -10013,7 +10013,7 @@ void StartupXLOG(void) if (SS_STANDBY_MODE && t_thrd.xlog_cxt.InRecovery == true && SS_ONDEMAND_REALTIME_BUILD_DISABLED) { /* do not need replay anything in SS standby mode */ - ereport(LOG, (errmsg("[SS] Skip redo replay in standby mode"))); + ereport(LOG, (errmsg("[SS reform] Skip redo replay in standby mode"))); t_thrd.xlog_cxt.InRecovery = false; } @@ -10023,9 +10023,9 @@ void StartupXLOG(void) t_thrd.xlog_cxt.InRecovery == true) { if (SSOndemandRecoveryExitNormal) { StartupOndemandRecovery(); - ereport(LOG, (errmsg("[On-demand] replayed in extreme rto ondemand recovery mode"))); + ereport(LOG, (errmsg("[SS reform][On-demand] replayed in extreme rto ondemand recovery mode"))); } else { - ereport(LOG, (errmsg("[On-demand] do not allow replay in ondemand recovery if last ondemand recovery " + ereport(LOG, (errmsg("[SS reform][On-demand] do not allow replay in ondemand recovery if last ondemand recovery " "crash, replayed in extreme rto recovery mode"))); } } @@ -10041,7 +10041,7 @@ void StartupXLOG(void) SetOndemandExtremeRtoMode(); g_instance.dms_cxt.SSRecoveryInfo.recovery_pause_flag = false; g_instance.dms_cxt.SSRecoveryInfo.ondemand_realtime_build_status = BUILD_NORMAL; - ereport(LOG, (errmsg("[On-demand] realtime build start finish, set status to BUILD_NORMAL"))); + ereport(LOG, (errmsg("[SS reform][On-demand] realtime build start finish, set status to BUILD_NORMAL"))); } /* refresh recovery parallelism */ @@ -11049,7 +11049,7 @@ void StartupXLOG(void) } if (!SS_IN_ONDEMAND_RECOVERY) { ereport(LOG, (errmodule(MOD_DMS), - errmsg("[SS switchover/SS failover/SS normal reform] start full checkpoint."))); + errmsg("[SS reform][SS failover][SS switchover] start full checkpoint."))); RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_IMMEDIATE | CHECKPOINT_WAIT); LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); t_thrd.shemem_ptr_cxt.ControlFile->state = DB_IN_PRODUCTION; @@ -11058,7 +11058,7 @@ void StartupXLOG(void) LWLockRelease(ControlFileLock); SSRecheckBufferPool(); ereport(LOG, (errmodule(MOD_DMS), - errmsg("[SS switchover/SS failover/SS normal reform] finished full checkpoint" + errmsg("[SS reform][SS failover][SS switchover] finished full checkpoint" "and update control file"))); } } @@ -11097,7 +11097,7 @@ void StartupXLOG(void) if (SS_IN_ONDEMAND_RECOVERY) { /* We wait at here */ - ereport(LOG, (errmsg("[On-demand] ondemand redo, nextXid: " XID_FMT ", startupMaxXid: " XID_FMT + ereport(LOG, (errmsg("[SS reform][On-demand] ondemand redo, nextXid: " XID_FMT ", startupMaxXid: " XID_FMT ", recentLocalXmin: " XID_FMT ", recentGlobalXmin: %lu, PendingPreparedXacts: %d" ", NextCommitSeqNo: %lu, cutoff_csn_min: %lu.", NextXidAfterReovery, t_thrd.xact_cxt.ShmemVariableCache->startupMaxXid, @@ -11118,7 +11118,7 @@ void StartupXLOG(void) LWLockRelease(ControlFileLock); SSRecheckBufferPool(); ereport(LOG, (errmodule(MOD_DMS), - errmsg("[On-demand] finished full checkpoint and update control file"))); + errmsg("[SS reform][On-demand] finished full checkpoint and update control file"))); NotifyGscRecoveryFinished(); if (ENABLE_INCRE_CKPT) { -- Gitee From c3677de646b3ac27758793ccf2fcc5d69ee37639 Mon Sep 17 00:00:00 2001 From: shijuzheng1997 Date: Sat, 20 Jul 2024 16:27:26 +0800 Subject: [PATCH 072/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=88=9B=E5=BB=BAfun?= =?UTF-8?q?ction=E5=A0=86=E6=A0=88=E5=A4=AA=E6=B7=B1=EF=BC=8C=E8=A7=A3?= =?UTF-8?q?=E6=9E=90=E6=97=B6=E5=80=99=E6=8A=A5=E8=B5=84=E6=BA=90=E4=B8=8D?= =?UTF-8?q?=E8=B6=B3=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/CMakeLists.txt | 6 ++++++ src/common/pl/plpgsql/src/Makefile | 2 ++ 2 files changed, 8 insertions(+) diff --git a/src/common/pl/plpgsql/CMakeLists.txt b/src/common/pl/plpgsql/CMakeLists.txt index 92d4849540..591a39bbf3 100755 --- a/src/common/pl/plpgsql/CMakeLists.txt +++ b/src/common/pl/plpgsql/CMakeLists.txt @@ -13,6 +13,12 @@ execute_process( OUTPUT_VARIABLE PL_GRAM ) +execute_process( + COMMAND sed -i "s/\# define YYINITDEPTH .*/\# define YYINITDEPTH 1000/g" ${CMAKE_CURRENT_SOURCE_DIR}/src/pl_gram.cpp + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE PL_GRAM +) + execute_process( COMMAND perl generate-plerrcodes.pl ${PROJECT_SRC_DIR}/common/backend/utils/errcodes.txt WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src diff --git a/src/common/pl/plpgsql/src/Makefile b/src/common/pl/plpgsql/src/Makefile index 6eeba66297..6ab5726c12 100755 --- a/src/common/pl/plpgsql/src/Makefile +++ b/src/common/pl/plpgsql/src/Makefile @@ -71,8 +71,10 @@ ifdef BISON $(BISON) -d $(BISONFLAGS) -o $@ $< $(PERL) $(top_builddir)/src/mtlocal.pl $(srcdir)/pl_gram.cpp $(PERL) $(top_builddir)/src/mtlocal.pl $(srcdir)/pl_gram.hpp + sed -i 's/\# define YYINITDEPTH .*/\# define YYINITDEPTH 1000/g' $(srcdir)/pl_gram.cpp else @$(missing) bison $< $@ + sed -i 's/\# define YYINITDEPTH .*/\# define YYINITDEPTH 1000/g' $(srcdir)/pl_gram.cpp endif # generate plerrcodes.h from src/backend/utils/errcodes.txt -- Gitee From 550d1dfeb2aad37cb41c31d6ee3c48459913e726 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Sat, 20 Jul 2024 15:40:44 +0800 Subject: [PATCH 073/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=AE=9E=E6=97=B6?= =?UTF-8?q?=E6=9E=84=E5=BB=BA=E5=85=B3=E9=97=AD=E9=98=B6=E6=AE=B5=E9=87=8D?= =?UTF-8?q?=E7=BD=AEhashmap=E5=86=85=E5=AD=98=E8=80=97=E6=97=B6=E8=BF=87?= =?UTF-8?q?=E9=95=BF=E5=AF=BC=E8=87=B4=E7=9A=84=E6=80=A7=E8=83=BD=E5=8F=8A?= =?UTF-8?q?SSWaitStartupExit=E9=80=80=E5=87=BA=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../access/transam/ondemand_extreme_rto/redo_utils.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index 9e21101144..801f5d65c1 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -80,6 +80,7 @@ void OndemandXlogFileIdCacheInit(void) void *OndemandXLogMemCtlInit(RedoMemManager *memctl, Size itemsize, int itemnum) { Size dataSize = (itemsize + sizeof(RedoMemSlot)) * itemnum; + ParseBufferDesc *descstate = NULL; Assert(t_thrd.storage_cxt.ondemandXLogMem != NULL); Assert(dataSize <= OndemandRecoveryShmemSize()); @@ -91,6 +92,9 @@ void *OndemandXLogMemCtlInit(RedoMemManager *memctl, Size itemsize, int itemnum) for (int i = memctl->totalblknum; i > 0; --i) { memctl->memslot[i - 1].buf_id = i; /* start from 1 , 0 is invalidbuffer */ memctl->memslot[i - 1].freeNext = i - 1; + // init parsebufferdesc because ondemandXLogMem may not memset 0 + descstate = (ParseBufferDesc *)(t_thrd.storage_cxt.ondemandXLogMem + itemsize * (i - 1)); + descstate->state = 0; } // only used firstreleaseslot of globalmemctl memctl->firstfreeslot = InvalidBuffer; @@ -196,10 +200,6 @@ void OndemandXLogParseBufferInit(RedoParseManager *parsemanager, int buffernum, void OndemandXLogParseBufferDestory(RedoParseManager *parsemanager) { g_parseManager = NULL; - // do not free parsebuffers, but memset it to 0, which is managed in shared memory - if (parsemanager->memctl.isInit) { - memset(t_thrd.storage_cxt.ondemandXLogMem, 0, OndemandRecoveryShmemSize()); - } parsemanager->parsebuffers = NULL; parsemanager->memctl.isInit = false; } -- Gitee From 917e617df8de78ff2e02b963c68a90245fa2a40e Mon Sep 17 00:00:00 2001 From: lukeman Date: Sat, 20 Jul 2024 17:45:20 +0800 Subject: [PATCH 074/347] =?UTF-8?q?=E6=94=AF=E6=8C=81gs=5Fprobackup?= =?UTF-8?q?=E4=BD=BF=E7=94=A8=E5=AF=B9=E8=B1=A1=E5=AD=98=E5=82=A8=E5=A4=87?= =?UTF-8?q?=E4=BB=BD=E5=A4=87=E8=8A=82=E7=82=B9=E6=97=B6=E5=8F=AF=E4=BB=A5?= =?UTF-8?q?=E8=B7=A8=E9=9B=86=E7=BE=A4=E6=81=A2=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/backup.cpp | 8 +++++++ src/bin/pg_probackup/catalog.cpp | 4 ---- src/bin/pg_probackup/dir.cpp | 4 ++++ .../pg_probackup/oss/include/oss_operator.h | 21 +------------------ src/bin/pg_probackup/oss/include/restore.h | 2 +- src/bin/pg_probackup/oss/oss_operator.cpp | 6 ++++-- src/bin/pg_probackup/oss/restore.cpp | 12 ++++++++--- src/bin/pg_probackup/pg_probackup.cpp | 6 +++++- src/bin/pg_probackup/restore.cpp | 3 +++ src/bin/pg_probackup/util.cpp | 3 +++ 10 files changed, 38 insertions(+), 31 deletions(-) diff --git a/src/bin/pg_probackup/backup.cpp b/src/bin/pg_probackup/backup.cpp index 19655066d4..76833d3c0f 100644 --- a/src/bin/pg_probackup/backup.cpp +++ b/src/bin/pg_probackup/backup.cpp @@ -1859,6 +1859,10 @@ void write_table_label_and_tablespace_map(pgBackup *backup, PGresult *res, file->uncompressed_size = file->size; parray_append(backup_files_list, file); } + + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(backup_label, backup_label); + } } if (sscanf_s(PQgetvalue(res, 0, 0), XID_FMT, recovery_xid) != 1) @@ -1905,6 +1909,10 @@ void write_table_label_and_tablespace_map(pgBackup *backup, PGresult *res, parray_append(backup_files_list, file); } + + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(tablespace_map, tablespace_map); + } } } diff --git a/src/bin/pg_probackup/catalog.cpp b/src/bin/pg_probackup/catalog.cpp index f5e107ee06..f645710551 100644 --- a/src/bin/pg_probackup/catalog.cpp +++ b/src/bin/pg_probackup/catalog.cpp @@ -460,10 +460,6 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) goto err_proc; } - if (current.media_type == MEDIA_TYPE_OSS) { - restoreConfigDir(); - } - /* scan the directory and list backups */ backups = parray_new(); for (; (data_ent = fio_readdir(data_dir)) != NULL; errno = 0) diff --git a/src/bin/pg_probackup/dir.cpp b/src/bin/pg_probackup/dir.cpp index e07894a00d..9a2b55a814 100644 --- a/src/bin/pg_probackup/dir.cpp +++ b/src/bin/pg_probackup/dir.cpp @@ -1407,6 +1407,10 @@ read_tablespace_map(parray *files, const char *backup_dir) join_path_components(map_path, db_path, PG_TABLESPACE_MAP_FILE); /* Exit if database/tablespace_map doesn't exist */ + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigFile(map_path, true); + } + if (!fileExists(map_path, FIO_BACKUP_HOST)) { elog(LOG, "there is no file tablespace_map"); diff --git a/src/bin/pg_probackup/oss/include/oss_operator.h b/src/bin/pg_probackup/oss/include/oss_operator.h index 5fcf0b2757..0b2de2e2d3 100644 --- a/src/bin/pg_probackup/oss/include/oss_operator.h +++ b/src/bin/pg_probackup/oss/include/oss_operator.h @@ -23,25 +23,6 @@ #define OSS_MAX_FILE_PATH 1024 #define OSS_MAX_ETAG_LEN 256 -/* Data Structure Definition*/ - -typedef struct OssFile { - char filePath[OSS_MAX_FILE_PATH]; - /* for write */ - char uploadID[OSS_MAX_UPLOAD_ID_LEN]; - char** eTagList; - int partNum; - /* for read */ - size_t fileSize; - bool oss_eof; - bool oss_error; - void* bufDate; - int byteCout; - int actualLen; - size_t offset; - char etag[OSS_MAX_ETAG_LEN]; -} OssFile; - /* API Function */ namespace Oss { @@ -53,7 +34,7 @@ class Oss { public: Oss(const char* endpoint, const char* access_key, const char* secret_key, const char* region = NULL, bool secure = false); ~Oss(); - void GetObject(const char* bucket_name, const char* object_name, const char* file_name); + void GetObject(const char* bucket_name, const char* object_name, const char* file_name, bool errorOk = false); void GetObject(const char* from_bucket, const char* object_key, void* file); void PutObject(const char* bucket_name, const char* file_path, const char* file_name); void RemoveObject(const char* bucket_name, const char* objcet_key); diff --git a/src/bin/pg_probackup/oss/include/restore.h b/src/bin/pg_probackup/oss/include/restore.h index a7498a38a9..e3803b57a6 100644 --- a/src/bin/pg_probackup/oss/include/restore.h +++ b/src/bin/pg_probackup/oss/include/restore.h @@ -39,7 +39,7 @@ extern void closeRestoreFile(FileAppenderSegDescriptor* desc); extern void restoreConfigDir(); -extern void restoreConfigFile(const char* path); +extern void restoreConfigFile(const char* path, bool errorOk = false); extern void uploadConfigFile(const char* local_path, const char* object_name); diff --git a/src/bin/pg_probackup/oss/oss_operator.cpp b/src/bin/pg_probackup/oss/oss_operator.cpp index 90f3cfeea1..cf0e93be83 100644 --- a/src/bin/pg_probackup/oss/oss_operator.cpp +++ b/src/bin/pg_probackup/oss/oss_operator.cpp @@ -99,7 +99,7 @@ void Oss::GetObject(const char* from_bucket, const char* object_key, void* fileP } } -void Oss::GetObject(const char* bucket_name, const char* object_name, const char* file_name) { +void Oss::GetObject(const char* bucket_name, const char* object_name, const char* file_name, bool errorOk) { auto s3_client = reinterpret_cast(s3_client_); Aws::S3::Model::GetObjectRequest request; request.SetBucket(bucket_name); @@ -107,7 +107,9 @@ void Oss::GetObject(const char* bucket_name, const char* object_name, const char auto outcome = s3_client->GetObject(request); if (!outcome.IsSuccess()) { auto err = outcome.GetError(); - elog(ERROR, "GetObject: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + int elevel = errorOk ? WARNING : ERROR; + elog(elevel, "GetObject: %s, %s", err.GetExceptionName().c_str(), err.GetMessage().c_str()); + return; } char* separator_pos = last_dir_separator(file_name); char* dir_path = strndup(file_name, separator_pos - file_name); diff --git a/src/bin/pg_probackup/oss/restore.cpp b/src/bin/pg_probackup/oss/restore.cpp index 40a4858337..04f71ea581 100644 --- a/src/bin/pg_probackup/oss/restore.cpp +++ b/src/bin/pg_probackup/oss/restore.cpp @@ -296,9 +296,15 @@ void restoreConfigDir() Oss::Oss* oss = getOssClient(); char* bucket_name = getBucketName(); char* prefix_name = backup_instance_path + 1; + char dir_path[MAXPGPATH]; + char arclog_path[MAXPGPATH]; + int nRet = 0; + fio_mkdir(backup_instance_path, DIR_PERMISSION, location); + nRet = snprintf_s(arclog_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", backup_path, "wal", instance_name); + securec_check_ss_c(nRet, "\0", "\0"); + fio_mkdir(arclog_path, DIR_PERMISSION, location); parray *obj_list = parray_new(); oss->ListObjectsWithPrefix(bucket_name, prefix_name, obj_list); - char dir_path[MAXPGPATH]; for (size_t i = 0; i < parray_num(obj_list); i++) { char* object = (char*)parray_get(obj_list, i); char* filename = last_dir_separator(object); @@ -316,14 +322,14 @@ void restoreConfigDir() parray_free(obj_list); } -void restoreConfigFile(const char* path) +void restoreConfigFile(const char* path, bool errorOk) { Oss::Oss* oss = getOssClient(); const char* object_name = NULL; const char* bucket_name = NULL; bucket_name = getBucketName(); object_name = path; - oss->GetObject(bucket_name, object_name, (char*)path); + oss->GetObject(bucket_name, object_name, (char*)path, errorOk); } void uploadConfigFile(const char* path, const char* object_name) diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index bfd1dc484a..d10d119e22 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -419,6 +419,10 @@ static void parse_instance_name() { struct stat st; + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigDir(); + } + if (fio_stat(backup_instance_path, &st, true, FIO_BACKUP_HOST) != 0) { elog(WARNING, "Failed to access directory \"%s\": %s", @@ -458,7 +462,7 @@ static void parse_instance_name() } config_read_opt(path, instance_options, ERROR, true, false); if (current.media_type == MEDIA_TYPE_OSS) { - remove(path); + uploadConfigFile(path, path); } } setMyLocation(); diff --git a/src/bin/pg_probackup/restore.cpp b/src/bin/pg_probackup/restore.cpp index a323e09850..11d109654a 100644 --- a/src/bin/pg_probackup/restore.cpp +++ b/src/bin/pg_probackup/restore.cpp @@ -2268,6 +2268,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, { rc = snprintf_s(backup_label, MAXPGPATH, MAXPGPATH - 1, "%s/backup_label", pgdata); securec_check_ss_c(rc, "\0", "\0"); + if (current.media_type == MEDIA_TYPE_OSS) { + restoreConfigFile(backup_label, true); + } if (fio_access(backup_label, F_OK, FIO_DB_HOST) == 0) { elog(WARNING, "Destination directory contains \"backup_control\" file. " diff --git a/src/bin/pg_probackup/util.cpp b/src/bin/pg_probackup/util.cpp index 93a30d70cc..d99c92c2f4 100644 --- a/src/bin/pg_probackup/util.cpp +++ b/src/bin/pg_probackup/util.cpp @@ -698,6 +698,9 @@ set_min_recovery_point(pgFile *file, const char *fullpath, /* Update pg_control checksum in backup_list */ file->crc = ControlFile.crc; + if (current.media_type == MEDIA_TYPE_OSS) { + uploadConfigFile(fullpath, fullpath); + } pg_free(buffer); } -- Gitee From 8919fa77c60fe5dd8b5bddecbb7805b1af3b65a0 Mon Sep 17 00:00:00 2001 From: laishenghao Date: Sat, 20 Jul 2024 19:24:21 +0800 Subject: [PATCH 075/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=88=86=E5=8C=BA?= =?UTF-8?q?=E8=A1=A8=E4=B8=8B=E5=B9=B6=E5=8F=91=E6=89=A7=E8=A1=8C=20insert?= =?UTF-8?q?=20=E5=92=8C=20reindex=20concurrently=E6=97=B6coredump=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/index.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common/backend/catalog/index.cpp b/src/common/backend/catalog/index.cpp index a4c6fc2493..e14d9887b3 100644 --- a/src/common/backend/catalog/index.cpp +++ b/src/common/backend/catalog/index.cpp @@ -2005,14 +2005,14 @@ void index_concurrently_part_swap(Oid newIndexPartId, Oid oldIndexPartId, const Oid oldIndexRelationId = PartIdGetParentId(oldIndexPartId, false); Oid newIndexRelationId = PartIdGetParentId(newIndexPartId, false); - Relation oldIndexRelation = index_open(oldIndexRelationId, ShareUpdateExclusiveLock); - Relation newIndexRelation = index_open(newIndexRelationId, ShareUpdateExclusiveLock); + Relation oldIndexRelation = index_open(oldIndexRelationId, AccessExclusiveLock); + Relation newIndexRelation = index_open(newIndexRelationId, AccessExclusiveLock); /* * Take a necessary lock on the old and new index part before swaping them. */ - oldIndexPartition = partitionOpen(oldIndexRelation, oldIndexPartId, ShareUpdateExclusiveLock); - newIndexPartition = partitionOpen(newIndexRelation, newIndexPartId, ShareUpdateExclusiveLock); + oldIndexPartition = partitionOpen(oldIndexRelation, oldIndexPartId, AccessExclusiveLock); + newIndexPartition = partitionOpen(newIndexRelation, newIndexPartId, AccessExclusiveLock); /* Now swap names of those index parts */ pg_partition = heap_open(PartitionRelationId, RowExclusiveLock); -- Gitee From 07e73f012f9e5eccf54f4336068e774e9645ed90 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Sat, 20 Jul 2024 18:04:48 +0800 Subject: [PATCH 076/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dpg=5Fproc=5Fext?= =?UTF-8?q?=E6=9C=AA=E6=8C=87=E5=AE=9Arowtypeid=E5=92=8C=E7=94=A8=E4=BE=8B?= =?UTF-8?q?=E4=B8=8D=E7=A8=B3=E5=AE=9A=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/include/catalog/pg_proc_ext.h | 4 +- .../upgrade_catalog_maindb_92_941.sql | 2 +- .../upgrade_catalog_otherdb_92_941.sql | 2 +- .../regress/expected/plpgsql_inout_param.out | 84 +++++++++---------- src/test/regress/sql/plpgsql_inout_param.sql | 1 + 5 files changed, 47 insertions(+), 46 deletions(-) diff --git a/src/include/catalog/pg_proc_ext.h b/src/include/catalog/pg_proc_ext.h index bc43c6d0c6..4be6bf80bf 100644 --- a/src/include/catalog/pg_proc_ext.h +++ b/src/include/catalog/pg_proc_ext.h @@ -37,9 +37,9 @@ * ---------------- */ #define ProcedureExtensionRelationId 3483 -#define ProcedureExtensionRelationId_Rowtype_Id 11663 +#define ProcedureExtensionRelationId_Rowtype_Id 3484 -CATALOG(pg_proc_ext,3483) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO +CATALOG(pg_proc_ext,3483) BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(3484) BKI_SCHEMA_MACRO { Oid proc_oid; /* procedure oid */ int2 parallel_cursor_seq; /* specify which cursor arg to be parallel for function */ diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql index 5c5ed1b90c..d42ba73767 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_941.sql @@ -1,5 +1,5 @@ --upgrade TABLE -SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 3483, 0, 0, 0; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 3483, 3484, 0, 0; CREATE TABLE IF NOT EXISTS pg_catalog.pg_proc_ext ( proc_oid oid not null, diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql index 85d451427c..157088833e 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_941.sql @@ -1,5 +1,5 @@ --upgrade TABLE -SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 3483, 0, 0, 0; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 3483, 3484, 0, 0; CREATE TABLE IF NOT EXISTS pg_catalog.pg_proc_ext ( proc_oid oid not null, diff --git a/src/test/regress/expected/plpgsql_inout_param.out b/src/test/regress/expected/plpgsql_inout_param.out index 9dc4bbac7d..a69044afdb 100644 --- a/src/test/regress/expected/plpgsql_inout_param.out +++ b/src/test/regress/expected/plpgsql_inout_param.out @@ -1890,46 +1890,46 @@ drop function test_inout_func2(); drop function test_inout_func(); drop type test_inout_type; -- clean +drop table inttest; drop schema if exists plpgsql_inout cascade; -NOTICE: drop cascades to 41 other objects -DETAIL: drop cascades to function proc2() -drop cascades to function proc4() -drop cascades to function proc5(integer[]) -drop cascades to function proc6() -drop cascades to function proc7(integer[],integer) -drop cascades to function proc8() -drop cascades to function proc9() -drop cascades to function proc10() -drop cascades to type info -drop cascades to function proc20(info,integer) -drop cascades to function proc21() -drop cascades to function proc11() -drop cascades to function proc12() -drop cascades to type o1 -drop cascades to type o2 -drop cascades to type o3 -drop cascades to function proc13() -drop cascades to function proc14() -drop cascades to type customer -drop cascades to table customers -drop cascades to function proc15() -drop cascades to function proc16(character varying) -drop cascades to function proc17() -drop cascades to type t -drop cascades to function proc3() -drop cascades to function proc1() -drop cascades to type t1 -drop cascades to function p1(integer) -drop cascades to function p2() -drop cascades to function fun1() -drop cascades to type myint -drop cascades to function myintin(cstring) -drop cascades to function myintout(myint) -drop cascades to function myinthash(myint) -drop cascades to cast from integer to myint -drop cascades to cast from myint to integer -drop cascades to function myinteq(myint,myint) -drop cascades to operator <>(myint,myint) -drop cascades to operator =(myint,myint) -drop cascades to operator family myint_ops for access method hash -drop cascades to table inttest +NOTICE: drop cascades to 40 other objects +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* diff --git a/src/test/regress/sql/plpgsql_inout_param.sql b/src/test/regress/sql/plpgsql_inout_param.sql index cd413aeb3f..0e2901675e 100644 --- a/src/test/regress/sql/plpgsql_inout_param.sql +++ b/src/test/regress/sql/plpgsql_inout_param.sql @@ -1543,4 +1543,5 @@ drop function test_inout_func(); drop type test_inout_type; -- clean +drop table inttest; drop schema if exists plpgsql_inout cascade; \ No newline at end of file -- Gitee From 045dfc05c54637774aae2a35b030b27d3be1c3b9 Mon Sep 17 00:00:00 2001 From: yuu Date: Tue, 2 Jul 2024 10:59:15 +0800 Subject: [PATCH 077/347] use cmake to build --- build.sh | 7 ++++++- build/script/build_opengauss.sh | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 4de8acaea7..5cd6b384e3 100755 --- a/build.sh +++ b/build.sh @@ -32,6 +32,7 @@ function print_help() -pm|--product_mode this values of paramenter is opengauss or lite or finance, the default value is opengauss. -nls|--enable_nls enable Native Language Support --relocation generate gaussdb.map with relocation(GCC >=10.3). + --cmake use cmake to build openGauss, which is faster than traditional configure/autoconf " } @@ -90,6 +91,10 @@ while [ $# -gt 0 ]; do extra_config_opt="$extra_config_opt --config_opt --enable-relocation " shift 1 ;; + -C|--cmake) + build_by_cmake='--cmake' + shift 1 + ;; *) echo "Internal Error: option processing error: $1" 1>&2 echo "please input right paramtenter, the following command may help you" @@ -103,7 +108,7 @@ ROOT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) echo "ROOT_DIR : $ROOT_DIR" cd build/script chmod a+x build_opengauss.sh -./build_opengauss.sh -m ${build_version_mode} -3rd ${build_binarylib_dir} ${not_optimized} -pkg server ${build_with_tassl} -pm ${product_mode} ${extra_config_opt} +./build_opengauss.sh -m ${build_version_mode} -3rd ${build_binarylib_dir} ${not_optimized} -pkg server ${build_with_tassl} -pm ${product_mode} ${extra_config_opt} ${build_by_cmake} if [ $? -ne 0 ]; then echo "build_opengauss.sh failed, aborting." exit 1 diff --git a/build/script/build_opengauss.sh b/build/script/build_opengauss.sh index 7f6fb18a76..f19a0fd7c6 100755 --- a/build/script/build_opengauss.sh +++ b/build/script/build_opengauss.sh @@ -29,6 +29,7 @@ function print_help() -s|--symbol_mode whether separate symbol in debug mode, the default value is on. -co|--cmake_opt more cmake options -T|--tassl build with tassl + --cmake build by cmake " } @@ -124,6 +125,10 @@ while [ $# -gt 0 ]; do build_with_tassl="YES" shift 1 ;; + --cmake) + CMAKE_PKG="Y" + shift 1 + ;; *) echo "Internal Error: option processing error: $1" 1>&2 echo "please input right paramtenter, the following command may help you" -- Gitee From f27f8a7bc32e4aaccbcb35b9cc1ebfc1eaed3eb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Mon, 22 Jul 2024 13:19:00 +0800 Subject: [PATCH 078/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dsequence=E5=A4=A7?= =?UTF-8?q?=E5=B0=8F=E5=86=99=E5=BF=BD=E7=95=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_expr.cpp | 2 +- .../regress/expected/single_node_sequence.out | 30 +++++++++++++++++++ src/test/regress/sql/single_node_sequence.sql | 18 +++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 722db712d0..849b07fb31 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -3186,7 +3186,7 @@ static Node* transformSequenceFuncCall(ParseState* pstate, Node* field1, Node* f if (field1 != NULL) { StringInfoData buf; initStringInfo(&buf); - appendStringInfo(&buf, "%s.%s", strVal(field1), strVal(field2)); + appendStringInfo(&buf, "\"%s\".\"%s\"", strVal(field1), strVal(field2)); arg = makeString(buf.data); } else { arg = (Value*)field2; diff --git a/src/test/regress/expected/single_node_sequence.out b/src/test/regress/expected/single_node_sequence.out index 94ab623325..152eda5e83 100644 --- a/src/test/regress/expected/single_node_sequence.out +++ b/src/test/regress/expected/single_node_sequence.out @@ -429,3 +429,33 @@ DROP USER seq_user; DROP SEQUENCE seq; drop sequence "QUOTATION_SEQ"; drop sequence no_quotation_seq; +-- uppercase sequence name +-- public schema +create table "T1" (c1 int, c2 int); +create sequence "SEQ1" increment by 1 maxvalue 9223372036854775807 start with 3 cache 20; +NOTICE: Not advised to use MAXVALUE or MINVALUE together with CACHE. +DETAIL: If CACHE is defined, some sequence values may be wasted, causing available sequence numbers to be less than expected. +insert into "T1" values(128, "SEQ1".nextval); +select * from "T1"; + c1 | c2 +-----+---- + 128 | 3 +(1 row) + +drop sequence "SEQ1"; +drop table "T1"; +-- new schema +create schema if not exists "NEW_SCHEMA"; +create table "NEW_SCHEMA"."T1" (c1 int, c2 int); +create sequence "NEW_SCHEMA"."SEQ1" increment by 1 maxvalue 9223372036854775807 start with 3 cache 20; +NOTICE: Not advised to use MAXVALUE or MINVALUE together with CACHE. +DETAIL: If CACHE is defined, some sequence values may be wasted, causing available sequence numbers to be less than expected. +insert into "NEW_SCHEMA"."T1" values (128, "NEW_SCHEMA"."SEQ1".nextval); +select * from "NEW_SCHEMA"."T1"; + c1 | c2 +-----+---- + 128 | 3 +(1 row) + +drop sequence "NEW_SCHEMA"."SEQ1"; +drop table "NEW_SCHEMA"."T1"; diff --git a/src/test/regress/sql/single_node_sequence.sql b/src/test/regress/sql/single_node_sequence.sql index 0c70040bd6..12b3155231 100644 --- a/src/test/regress/sql/single_node_sequence.sql +++ b/src/test/regress/sql/single_node_sequence.sql @@ -192,3 +192,21 @@ DROP USER seq_user; DROP SEQUENCE seq; drop sequence "QUOTATION_SEQ"; drop sequence no_quotation_seq; + +-- uppercase sequence name +-- public schema +create table "T1" (c1 int, c2 int); +create sequence "SEQ1" increment by 1 maxvalue 9223372036854775807 start with 3 cache 20; +insert into "T1" values(128, "SEQ1".nextval); +select * from "T1"; +drop sequence "SEQ1"; +drop table "T1"; + +-- new schema +create schema if not exists "NEW_SCHEMA"; +create table "NEW_SCHEMA"."T1" (c1 int, c2 int); +create sequence "NEW_SCHEMA"."SEQ1" increment by 1 maxvalue 9223372036854775807 start with 3 cache 20; +insert into "NEW_SCHEMA"."T1" values (128, "NEW_SCHEMA"."SEQ1".nextval); +select * from "NEW_SCHEMA"."T1"; +drop sequence "NEW_SCHEMA"."SEQ1"; +drop table "NEW_SCHEMA"."T1"; -- Gitee From f55e86591b97783defb5b389a85840eb12ec2ad2 Mon Sep 17 00:00:00 2001 From: luqichao Date: Mon, 22 Jul 2024 15:09:08 +0800 Subject: [PATCH 079/347] adaptor hccs --- src/gausskernel/storage/gs_uwal/gs_uwal.cpp | 22 +++++++++++++++++---- src/include/storage/gs_uwal/uwal.h | 1 + 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/gs_uwal/gs_uwal.cpp b/src/gausskernel/storage/gs_uwal/gs_uwal.cpp index 6937708a19..c027df8af2 100644 --- a/src/gausskernel/storage/gs_uwal/gs_uwal.cpp +++ b/src/gausskernel/storage/gs_uwal/gs_uwal.cpp @@ -364,11 +364,14 @@ static bool GsUwalParseConfig(cJSON *uwalConfJSON) } else if (!strcasecmp(protocolJSON->valuestring, "rdma")) { rc = strcpy_s(g_uwalConfig.protocol, UWAL_PROTOCOL_LEN, "rdma"); securec_check(rc, "\0", "\0"); + } else if (!strcasecmp(protocolJSON->valuestring, "ub")) { + rc = strcpy_s(g_uwalConfig.protocol, UWAL_PROTOCOL_LEN, "ub"); + securec_check(rc, "\0", "\0"); } else if (!strcasecmp(protocolJSON->valuestring, "tcp")) { rc = strcpy_s(g_uwalConfig.protocol, UWAL_PROTOCOL_LEN, "tcp"); securec_check(rc, "\0", "\0"); } else { - ereport(WARNING, (errmsg("uwal_protocol only support tcp and rdma, will use the default protocol tcp"))); + ereport(WARNING, (errmsg("uwal_protocol only support tcp, rdma and ub, will use the default protocol tcp"))); rc = strcpy_s(g_uwalConfig.protocol, UWAL_PROTOCOL_LEN, "tcp"); securec_check(rc, "\0", "\0"); } @@ -404,8 +407,9 @@ static bool GsUwalParseConfig(cJSON *uwalConfJSON) rc = strcpy_s(g_uwalConfig.repliNodes[nodeId], UWAL_PROTOCOL_LEN, "tcp"); securec_check(rc, "\0", "\0"); } else if (strcasecmp(subProtocolJSON->valuestring, "rdma") && + strcasecmp(subProtocolJSON->valuestring, "ub") && strcasecmp(subProtocolJSON->valuestring, "tcp")) { - ereport(WARNING, (errmsg("protocol only support tcp and rdma, use the default protocol tcp"))); + ereport(WARNING, (errmsg("protocol only support tcp, rdma and ub, use the default protocol tcp"))); rc = strcpy_s(g_uwalConfig.repliNodes[nodeId], UWAL_PROTOCOL_LEN, "tcp"); securec_check(rc, "\0", "\0"); } else { @@ -613,6 +617,8 @@ void GetLocalStateInfo(OUT NodeStateInfo *nodeStateInfo) netInfo.protocol = NET_PROTOCOL_TCP; if (!strcasecmp(g_uwalConfig.protocol, "rdma")) { netInfo.protocol = NET_PROTOCOL_RDMA; + } else if (!strcasecmp(g_uwalConfig.protocol, "ub")) { + netInfo.protocol = NET_PROTOCOL_UB; } NetList netList; @@ -802,8 +808,12 @@ int GsUwalWalSenderNotify(bool exceptSelf) netInfo.ipv4Addr = ock_uwal_ipv4_inet_to_int((char *)replConnInfo->remoteuwalhost); netInfo.port = replConnInfo->remoteuwalport; netInfo.protocol = NET_PROTOCOL_TCP; - if (!strcasecmp(g_uwalConfig.protocol, "rdma") && !strcasecmp(g_uwalConfig.repliNodes[standbyStateInfo.nodeId], "rdma")) { + if (!strcasecmp(g_uwalConfig.protocol, "rdma") && + !strcasecmp(g_uwalConfig.repliNodes[standbyStateInfo.nodeId], "rdma")) { netInfo.protocol = NET_PROTOCOL_RDMA; + } else if (!strcasecmp(g_uwalConfig.protocol, "ub") && + !strcasecmp(g_uwalConfig.repliNodes[standbyStateInfo.nodeId], "ub")) { + netInfo.protocol = NET_PROTOCOL_UB; } NetList netList; @@ -879,8 +889,12 @@ int GsUwalWalReceiverNotify(bool isConnectedToPrimary) netInfo.ipv4Addr = ock_uwal_ipv4_inet_to_int((char *)replConnInfo->remotehost); netInfo.port = replConnInfo->remoteuwalport; netInfo.protocol = NET_PROTOCOL_TCP; - if (!strcasecmp(g_uwalConfig.protocol, "rdma") && !strcasecmp(g_uwalConfig.repliNodes[primaryStateInfo.nodeId], "rdma")) { + if (!strcasecmp(g_uwalConfig.protocol, "rdma") && + !strcasecmp(g_uwalConfig.repliNodes[primaryStateInfo.nodeId], "rdma")) { netInfo.protocol = NET_PROTOCOL_RDMA; + } else if (!strcasecmp(g_uwalConfig.protocol, "ub") && + !strcasecmp(g_uwalConfig.repliNodes[primaryStateInfo.nodeId], "ub")) { + netInfo.protocol = NET_PROTOCOL_UB; } NetList netList; diff --git a/src/include/storage/gs_uwal/uwal.h b/src/include/storage/gs_uwal/uwal.h index ca2e3bb320..490ac7f942 100644 --- a/src/include/storage/gs_uwal/uwal.h +++ b/src/include/storage/gs_uwal/uwal.h @@ -365,6 +365,7 @@ typedef int (*UwalRegisterCertVerifyFunc)(int32_t (*certVerify)(void* certStoreC typedef enum { NET_PROTOCOL_TCP = 0, NET_PROTOCOL_RDMA = 1, + NET_PROTOCOL_UB = 2, NET_PROTOCOL_BUTT } NetProtocol; -- Gitee From 7e7446b8fd829a80a0f8d79339511af36084e095 Mon Sep 17 00:00:00 2001 From: blig Date: Mon, 1 Jul 2024 11:32:57 +0800 Subject: [PATCH 080/347] =?UTF-8?q?=E3=80=90=E9=9C=80=E6=B1=82=E3=80=91gs?= =?UTF-8?q?=5Fctl=E6=97=A5=E5=BF=97=E6=8C=89=E7=85=A7=E4=B8=AA=E6=95=B0?= =?UTF-8?q?=E8=BD=AE=E8=BD=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/lib/elog/elog.cpp | 214 +++++++++++++++++- src/test/ha/ha_schedule_multi_single | 1 + .../multi_standby_single/log_remove.sh | 84 +++++++ 3 files changed, 291 insertions(+), 8 deletions(-) create mode 100644 src/test/ha/testcase/multi_standby_single/log_remove.sh diff --git a/src/lib/elog/elog.cpp b/src/lib/elog/elog.cpp index 56dd64621c..0a91d56099 100644 --- a/src/lib/elog/elog.cpp +++ b/src/lib/elog/elog.cpp @@ -30,6 +30,8 @@ #define _WIN32_WINNT 0x0501 #endif +#include +#include #include #include #include @@ -44,6 +46,7 @@ #endif #include "postgres_fe.h" +#include "flock.h" #ifdef WIN32_PG_DUMP #undef PGDLLIMPORT @@ -61,10 +64,26 @@ #undef WIN32 #endif +typedef struct ToolLogInfo { + time_t mTime; + time_t cTime; + char fileName[0]; +} ToolLogInfo; + +#ifndef palloc +#define palloc(sz) malloc(sz) +#endif +#ifndef pfree +#define pfree(ptr) free(ptr) +#endif + +#define LOG_MAX_COUNT 50 +#define GS_LOCKFILE_SIZE 1024 #define curLogFileMark "-current.log" // optimize,to suppose pirnt to file and screen static bool allow_log_store = false; void check_env_value_c(const char* input_env_value); + /* * @@GaussDB@@ * Brief : @@ -195,7 +214,173 @@ static void set_log_filename(char* log_new_name, const char* log_old_name) securec_check_c(rc, "\0", "\0"); } -static int create_log_file(const char* prefix_name, const char* log_path) +static int gs_srvtool_lock(const char *prefix_name, const char *log_dir, FILE **fd) +{ + int ret; + struct stat statbuf; + int fildes = 0; + char lockfile[MAXPGPATH] = {'\0'}; + + ret = snprintf_s(lockfile, sizeof(lockfile), sizeof(lockfile) - 1, "%s/%s.%s", log_dir, prefix_name, "lock"); + securec_check_ss_c(ret, "\0", "\0"); + + canonicalize_path(lockfile); + + /* If lock file dose not exist, create it */ + if (stat(lockfile, &statbuf) != 0) { + char content[GS_LOCKFILE_SIZE] = {0}; + *fd = fopen(lockfile, PG_BINARY_W); + if (*fd == NULL) { + printf(_("%s: can't create lock file \"%s\" : %s\n"), prefix_name, lockfile, gs_strerror(errno)); + exit(1); + } + + fildes = fileno(*fd); + if (fchmod(fildes, S_IRUSR | S_IWUSR) == -1) { + printf(_("%s: can't chmod lock file \"%s\" : %s\n"), prefix_name, lockfile, gs_strerror(errno)); + /* Close file and Nullify the pointer for retry */ + fclose(*fd); + *fd = NULL; + exit(1); + } + + if (fwrite(content, GS_LOCKFILE_SIZE, 1, *fd) != 1) { + fclose(*fd); + *fd = NULL; + printf(_("%s: can't write lock file \"%s\" : %s\n"), prefix_name, lockfile, gs_strerror(errno)); + exit(1); + } + fclose(*fd); + *fd = NULL; + } + + if ((*fd = fopen(lockfile, PG_BINARY_W)) == NULL) { + printf(_("%s: can't open lock file \"%s\" : %s\n"), prefix_name, lockfile, gs_strerror(errno)); + exit(1); + } + + ret = flock(fileno(*fd), LOCK_EX | LOCK_NB, 0, START_LOCATION, GS_LOCKFILE_SIZE); + return ret; +} + +static inline int gs_srvtool_unlock(FILE *fd) +{ + int ret = -1; + + if (fd != NULL) { + ret = flock(fileno(fd), LOCK_UN, 0, START_LOCATION, GS_LOCKFILE_SIZE); + fclose(fd); + fd = NULL; + } + + return ret; +} + +static inline int file_time_cmp(const void *v1, const void *v2) +{ + const ToolLogInfo *l1 = *(ToolLogInfo **)v1; + const ToolLogInfo *l2 = *(ToolLogInfo **)v2; + + int result = l1->mTime - l2->mTime; + if (result == 0) { + return l1->cTime - l2->cTime; + } + return result; +} + +static inline void free_file_list(ToolLogInfo **file_list, int count) +{ + for (int i = 0; i < count; i++) { + pfree(file_list[i]); + } + pfree(file_list); +} + +static inline bool str_end_with(const char *str, const char *end) +{ + int slen = strlen(str); + int elen = strlen(end); + if (elen > slen) { + return false; + } else { + return (strcmp(str + slen - elen, end) == 0); + } +} + +static void remove_oldest_log(const char *prefix_name, const char *log_path, int count) +{ + DIR *dir = NULL; + struct dirent *de = NULL; + errno_t rc = EOK; + + int file_len = strlen(prefix_name) + strlen("-yyyy-mm-dd_hhmmss.log"); + size_t info_size = sizeof(ToolLogInfo) + file_len + 1; + ToolLogInfo **file_list = (ToolLogInfo **)palloc(sizeof(ToolLogInfo *) * count); + if (file_list == NULL) { + printf(_("%s: palloc memory failed! %s\n"), prefix_name, gs_strerror(errno)); + return; + } + + for (int i = 0; i < count; i++) { + file_list[i] = (ToolLogInfo *)palloc(info_size); + if (file_list[i] == NULL) { + printf(_("%s: palloc memory failed! %s\n"), prefix_name, gs_strerror(errno)); + free_file_list(file_list, i); + return; + } + rc = memset_s(file_list[i], info_size, 0, info_size); + securec_check_c(rc, "\0", "\0"); + } + + if ((dir = opendir(log_path)) == NULL) { + free_file_list(file_list, count); + printf(_("%s: opendir %s failed! %s\n"), prefix_name, log_path, gs_strerror(errno)); + return; + } + + int slot = 0; + struct stat fst; + char pathname[MAXPGPATH] = {'\0'}; + while ((de = readdir(dir)) != NULL) { + if (strncmp(de->d_name, prefix_name, strlen(prefix_name)) == 0 && + !str_end_with(de->d_name, curLogFileMark) && + !str_end_with(de->d_name, ".lock")) { + rc = snprintf_s(pathname, MAXPGPATH, MAXPGPATH - 1, "%s/%s", log_path, de->d_name); + securec_check_ss_c(rc, "\0", "\0"); + if (stat(pathname, &fst) < 0) { + printf(_("%s: could not stat file %s\n"), prefix_name, pathname, gs_strerror(errno)); + continue; + } + + file_list[slot]->mTime = fst.st_mtime; + file_list[slot]->cTime = fst.st_ctime; + rc = strncpy_s(file_list[slot]->fileName, file_len + 1, de->d_name, strlen(de->d_name)); + securec_check_c(rc, "\0", "\0"); + slot++; + } + } + + qsort(file_list, slot, sizeof(ToolLogInfo *), file_time_cmp); + printf(_("%s: log file count %d, exceeds %d, remove the oldest ones\n"), prefix_name, count, LOG_MAX_COUNT); + + int remove_cnt = 0; + while (remove_cnt < count - LOG_MAX_COUNT) { + rc = snprintf_s(pathname, MAXPGPATH, MAXPGPATH - 1, "%s/%s", log_path, file_list[remove_cnt]->fileName); + securec_check_ss_c(rc, "\0", "\0"); + if (remove(pathname) < 0) { + printf(_("%s: remove log file %s failed!\n"), prefix_name, pathname, gs_strerror(errno)); + continue; + } + + remove_cnt++; + printf(_("%s: remove log file %s successfully, remain %d files\n"), prefix_name, pathname, count - remove_cnt); + } + + free_file_list(file_list, count); + (void)closedir(dir); +} + +static int create_log_file(const char* prefix_name, const char* log_path, int *count) { #define LOG_MAX_SIZE (16 * 1024 * 1024) #define LOG_MAX_TIMELEN 80 @@ -238,7 +423,8 @@ static int create_log_file(const char* prefix_name, const char* log_path) while (NULL != (de = readdir(dir))) { // exist current log file - if (NULL != strstr(de->d_name, prefix_name)) { + if (NULL != strstr(de->d_name, prefix_name) && !str_end_with(de->d_name, ".lock")) { + *count += 1; name_ptr = strstr(de->d_name, "-current.log"); if (NULL != name_ptr) { name_ptr += strlen("-current.log"); @@ -269,9 +455,8 @@ static int create_log_file(const char* prefix_name, const char* log_path) (void)closedir(dir); return -1; } + is_exist = false; } - (void)closedir(dir); - return 0; } } } @@ -300,15 +485,16 @@ static int create_log_file(const char* prefix_name, const char* log_path) (void)dup2(fd, fileno(stderr)); (void)fprintf(stderr, _("[%s]\n"), current_localtime); // add current time to log close(fd); + *count += 1; } (void)closedir(dir); return 0; } -static void redirect_output(const char* prefix_name, const char* log_dir) +static void redirect_output(const char* prefix_name, const char* log_dir, int *count) { - if (0 != create_log_file(prefix_name, log_dir)) { + if (0 != create_log_file(prefix_name, log_dir, count)) { printf(_("Warning: create_log_file failed!\n")); return; } @@ -343,7 +529,19 @@ void init_log(char* prefix_name) } allow_log_store = is_redirect; // if false print to screen, if true print to file if (true == is_redirect) { - redirect_output(prefix_name, log_dir); + int file_count = 0; + FILE *fd = NULL; + if (gs_srvtool_lock(prefix_name, log_dir, &fd) == -1) { + printf(_("another %s command is running, init_log failed!\n"), prefix_name); + exit(1); + } + + redirect_output(prefix_name, log_dir, &file_count); + if (file_count > LOG_MAX_COUNT) { + remove_oldest_log(prefix_name, log_dir, file_count); + } + + gs_srvtool_unlock(fd); } } @@ -403,4 +601,4 @@ void GenerateProgressBar(int percent, char* progressBar) progressBar[barWidth + 1] = ']'; progressBar[barWidth + 2] = '\0'; -} \ No newline at end of file +} diff --git a/src/test/ha/ha_schedule_multi_single b/src/test/ha/ha_schedule_multi_single index 0142b66aa6..686f76f432 100644 --- a/src/test/ha/ha_schedule_multi_single +++ b/src/test/ha/ha_schedule_multi_single @@ -16,3 +16,4 @@ multi_standby_single/hash_index multi_standby_single/consistency.sh multi_standby_single/wal_stat multi_standby_single/xlog_prune +multi_standby_single/log_remove diff --git a/src/test/ha/testcase/multi_standby_single/log_remove.sh b/src/test/ha/testcase/multi_standby_single/log_remove.sh new file mode 100644 index 0000000000..d1971c6a53 --- /dev/null +++ b/src/test/ha/testcase/multi_standby_single/log_remove.sh @@ -0,0 +1,84 @@ +#!/bin/sh + +:< Date: Fri, 28 Jun 2024 12:05:54 +0800 Subject: [PATCH 081/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3timescaledb=E6=8F=92?= =?UTF-8?q?=E4=BB=B6,=20=E6=99=AE=E9=80=9A=E8=A1=A8=E4=BF=AE=E6=94=B9?= =?UTF-8?q?=E5=88=97=E5=AD=97=E6=AE=B5=E5=B1=9E=E6=80=A7=E5=90=8E=E8=BD=AC?= =?UTF-8?q?=E5=8C=96=E4=B8=BA=E8=B6=85=E8=A1=A8=E6=8F=92=E5=85=A5=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E5=A4=B1=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/plan/setrefs.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/optimizer/plan/setrefs.cpp b/src/gausskernel/optimizer/plan/setrefs.cpp index a88b0333af..ec5db1b956 100644 --- a/src/gausskernel/optimizer/plan/setrefs.cpp +++ b/src/gausskernel/optimizer/plan/setrefs.cpp @@ -120,7 +120,7 @@ static bool fix_scan_expr_walker(Node* node, fix_scan_expr_context* context); static void set_join_references(PlannerInfo* root, Join* join, int rtoffset); static void set_upper_references(PlannerInfo* root, Plan* plan, int rtoffset); static void set_dummy_tlist_references(Plan* plan, int rtoffset); -static indexed_tlist* build_tlist_index(List* tlist); +static indexed_tlist* build_tlist_index(List* tlist, bool returnConstForConst = false); static Var* search_indexed_tlist_for_var(Var* var, indexed_tlist* itlist, Index newvarno, int rtoffset); static Var* search_indexed_tlist_for_non_var(Node* node, indexed_tlist* itlist, Index newvarno); static Var* search_indexed_tlist_for_sortgroupref( @@ -1228,7 +1228,7 @@ static void set_tlist_qual_extensible_exprs_of_extensibleplan(PlannerInfo* root, { if (cscan->extensible_plan_tlist != NIL || cscan->scan.scanrelid == 0) { /* Adjust tlist, qual, extensible_exprs to reference extensible scan tuple */ - indexed_tlist* itlist = build_tlist_index(cscan->extensible_plan_tlist); + indexed_tlist* itlist = build_tlist_index(cscan->extensible_plan_tlist, true); cscan->scan.plan.targetlist = (List*)fix_upper_expr(root, (Node*)cscan->scan.plan.targetlist, itlist, INDEX_VAR, rtoffset); @@ -1771,7 +1771,7 @@ static void set_dummy_tlist_references(Plan* plan, int rtoffset) * to search_indexed_tlist_for_var() or search_indexed_tlist_for_non_var(). * When done, the indexed_tlist may be freed with a single pfree_ext(). */ -static indexed_tlist* build_tlist_index(List* tlist) +static indexed_tlist* build_tlist_index(List* tlist, bool returnConstForConst) { indexed_tlist* itlist = NULL; tlist_vinfo* vinfo = NULL; @@ -1799,6 +1799,8 @@ static indexed_tlist* build_tlist_index(List* tlist) vinfo++; } else if (tle->expr && IsA(tle->expr, PlaceHolderVar)) itlist->has_ph_vars = true; + else if (tle->expr && IsA(tle->expr, Const) && returnConstForConst) + itlist->return_const = true; else itlist->has_non_vars = true; } -- Gitee From 7ad99d704a873e9c9542ce9e0bb83f7bdc5d0dc6 Mon Sep 17 00:00:00 2001 From: lukeman Date: Tue, 23 Jul 2024 14:46:41 +0800 Subject: [PATCH 082/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E5=9F=BA=E8=A1=A8=E5=85=B3=E8=81=94=E8=A7=86=E5=9B=BE=E5=90=8E?= =?UTF-8?q?=EF=BC=8C=E6=8A=8A=E5=9F=BA=E8=A1=A8=E5=88=97=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E7=94=B1int=E6=94=B9=E4=B8=BAnumeric=E5=90=8E=EF=BC=8C?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E8=A7=86=E5=9B=BE=E5=92=8C=E5=9F=BA=E8=A1=A8?= =?UTF-8?q?=E6=97=B6=EF=BC=8C=E6=8A=A5=E9=94=99=E8=AF=B4ERROR:compressed?= =?UTF-8?q?=20data=20is=20corrupt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_relation.cpp | 40 ++++++++++++++++++-- src/test/regress/expected/dependent_view.out | 26 +++++++++++++ src/test/regress/sql/dependent_view.sql | 11 ++++++ 3 files changed, 74 insertions(+), 3 deletions(-) diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp index df06b36f28..4935dac876 100755 --- a/src/common/backend/parser/parse_relation.cpp +++ b/src/common/backend/parser/parse_relation.cpp @@ -1081,6 +1081,31 @@ static void CheckPgAttribute(Oid obj_oid, char* attName, Form_pg_attribute new_a heap_close(rel, RowExclusiveLock); } +static bool findDependentTable(Relation rel, Oid type_id) +{ + bool found = false; + if (!OidIsValid(type_id)) { + return found; + } + const int keyNum = 2; + ScanKeyData key_dep[keyNum]; + SysScanDesc scan_dep = NULL; + HeapTuple tup_dep = NULL; + ScanKeyInit(&key_dep[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(TypeRelationId)); + ScanKeyInit(&key_dep[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(type_id)); + scan_dep = systable_beginscan(rel, DependReferenceIndexId, true, NULL, keyNum, key_dep); + while (HeapTupleIsValid((tup_dep = systable_getnext(scan_dep)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup_dep); + if (depform->classid == RelationRelationId) { + found = true; + break; + } + } + systable_endscan(scan_dep); + return found; +} + enum ValidateDependResult { ValidateDependInvalid, ValidateDependValid, @@ -1091,7 +1116,9 @@ enum ValidateDependResult { static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List** list) { bool isValid = true; + bool existTable = false; Oid rw_objid = InvalidOid; + Oid type_id = InvalidOid; // 1. filter the valid view if (GetPgObjectValid(view_oid, objType)) { return ValidateDependValid; @@ -1103,7 +1130,7 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* } *list = lappend_oid(*list, view_oid); - // 2. find pg_rewrite entry which this view depends on internally + // 2. find pg_rewrite/pg_type entry which depend on this view internally const int keyNum = 2; ScanKeyData key[keyNum]; SysScanDesc scan = NULL; @@ -1117,7 +1144,8 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup); if (depform->classid == RewriteRelationId && depform->deptype == DEPENDENCY_INTERNAL) { rw_objid = depform->objid; - break; + } else if (depform->classid == TypeRelationId && depform->deptype == DEPENDENCY_INTERNAL) { + type_id = depform->objid; } } systable_endscan(scan); @@ -1125,7 +1153,7 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* if (!OidIsValid(rw_objid)) { elog(ERROR, "cannot find the internal dependent pg_rewrite entry."); } - // 3. find all columns of parent views and tables which this view depends on directly, + // 3.1 find all columns of parent views and tables which this view depends on directly, // and check their validity recursively. List *query_str = NIL; ScanKeyData key_dep[keyNum]; @@ -1202,6 +1230,12 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* } pfree_ext(newtuple); systable_endscan(scan_dep); + // 3.2 find views or tables which depend on this view directly, + // and report error if tables exist. + existTable = findDependentTable(rel_dep, type_id); + if (existTable) { + elog(ERROR, "The view is invalid. There is a table dependent on the view so it cannot be recompiled."); + } heap_close(rel_dep, RowExclusiveLock); // 4. mark the current view valid if (!circularDependency) { diff --git a/src/test/regress/expected/dependent_view.out b/src/test/regress/expected/dependent_view.out index 7dd74b90d9..5459a3d3ea 100644 --- a/src/test/regress/expected/dependent_view.out +++ b/src/test/regress/expected/dependent_view.out @@ -648,6 +648,29 @@ create view circular_dependency_v4 as select * from circular_dependency_v3; create or replace view circular_dependency_v3 as select * from circular_dependency_t2 union all select * from circular_dependency_v4; alter table circular_dependency_t2 modify id int8; -- failed ERROR: infinite recursion detected in rules for relation: "circular_dependency_v3" +-- test5 table depends on a view +create table t11 (a int, b int); +create view v11 as select * from t11; +create table t12 (v v11); +insert into t11 values (1,2), (3,4); +insert into t12 select v11 from v11; +alter table t11 modify b numeric; +select * from t12;-- ok + v +------- + (1,2) + (3,4) +(2 rows) + +select * from v11;-- expect error +ERROR: The view is invalid. There is a table dependent on the view so it cannot be recompiled. +select * from t12;-- ok + v +------- + (1,2) + (3,4) +(2 rows) + --- clean drop schema dependent_view cascade; --?.* @@ -661,3 +684,6 @@ drop cascades to view circular_dependency_v2 drop cascades to table circular_dependency_t2 drop cascades to view circular_dependency_v4 drop cascades to view circular_dependency_v3 +drop cascades to table t11 +drop cascades to view v11 +drop cascades to table t12 diff --git a/src/test/regress/sql/dependent_view.sql b/src/test/regress/sql/dependent_view.sql index 5239390152..2c0c706ef5 100644 --- a/src/test/regress/sql/dependent_view.sql +++ b/src/test/regress/sql/dependent_view.sql @@ -235,5 +235,16 @@ create view circular_dependency_v4 as select * from circular_dependency_v3; create or replace view circular_dependency_v3 as select * from circular_dependency_t2 union all select * from circular_dependency_v4; alter table circular_dependency_t2 modify id int8; -- failed +-- test5 table depends on a view +create table t11 (a int, b int); +create view v11 as select * from t11; +create table t12 (v v11); +insert into t11 values (1,2), (3,4); +insert into t12 select v11 from v11; +alter table t11 modify b numeric; +select * from t12;-- ok +select * from v11;-- expect error +select * from t12;-- ok + --- clean drop schema dependent_view cascade; -- Gitee From 6024672af7ce13d63c8bf0c96139de3d556873d7 Mon Sep 17 00:00:00 2001 From: JulianZhang Date: Tue, 23 Jul 2024 14:48:41 +0800 Subject: [PATCH 083/347] fix: more xlog files are initialized when available xlogs are deleted manully --- src/gausskernel/storage/access/transam/xlog.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 51d584e22a..170ef11034 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -2728,9 +2728,12 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) * Unlock WalAuxiliary thread to init new xlog segment if we are running out * of xlog segments, or available segments is less than wal_file_init_num * 0.2. */ - if (!use_existent || !segs_enough) { + if (!segs_enough) { g_instance.wal_cxt.globalEndPosSegNo = Max(g_instance.wal_cxt.globalEndPosSegNo, t_thrd.xlog_cxt.openLogSegNo); WakeupWalSemaphore(&g_instance.wal_cxt.walInitSegLock->l.sem); + } else if (!use_existent) { + g_instance.wal_cxt.globalEndPosSegNo = t_thrd.xlog_cxt.openLogSegNo; + WakeupWalSemaphore(&g_instance.wal_cxt.walInitSegLock->l.sem); } } -- Gitee From fa1b926b8213722a39541ad7741e4a77e0a87258 Mon Sep 17 00:00:00 2001 From: jiangyan <18091841830@163.com> Date: Mon, 8 Jul 2024 12:01:14 +0800 Subject: [PATCH 084/347] preparse --- src/common/backend/catalog/builtin_funcs.ini | 4 + .../backend/pgxc_single/barrier/barrier.cpp | 27 +----- src/common/backend/utils/init/globals.cpp | 3 +- .../process/postmaster/barrier_preparse.cpp | 91 +++++++++++++++++++ .../process/postmaster/postmaster.cpp | 50 ++++------ .../process/threadpool/knl_instance.cpp | 3 + .../rollback-post_catalog_maindb_92_942.sql | 1 + .../rollback-post_catalog_otherdb_92_942.sql | 1 + .../upgrade-post_catalog_maindb_92_942.sql | 9 ++ .../upgrade_catalog_otherdb_92_942.sql | 9 ++ src/include/knl/knl_instance.h | 5 + src/include/postmaster/barrier_preparse.h | 2 + 12 files changed, 146 insertions(+), 59 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_942.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_942.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_942.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_942.sql diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 143ccf1145..87e62eb8b8 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -3711,6 +3711,10 @@ "gs_get_parallel_decode_status", 1, AddBuiltinFunc(_0(9377), _1("gs_get_parallel_decode_status"), _2(0), _3(false), _4(true), _5(gs_get_parallel_decode_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(7, 25, 23, 25, 25, 25, 20, 20), _22(7, 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(7, "slot_name", "parallel_decode_num", "read_change_queue_length", "decode_change_queue_length", "reader_lsn", "working_txn_cnt", "working_txn_memory"), _24(NULL), _25("gs_get_parallel_decode_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_get_preparse_location", 1, + AddBuiltinFunc(_0(2874), _1("gs_get_preparse_location"), _2(0), _3(false), _4(true), _5(gs_get_preparse_location), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(3, 25, 25, 25), _22(3, 'o', 'o', 'o'), _23(3, "preparse_start_location", "preparse_end_location", "last_valid_record"), _24(NULL), _25("gs_get_preparse_location"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: information about WAL locations"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_get_standby_cluster_barrier_status", 1, AddBuiltinFunc(_0(9039), _1("gs_get_standby_cluster_barrier_status"), _2(0), _3(true), _4(false), _5(gs_get_standby_cluster_barrier_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(4, 25, 25, 25, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "barrier_id", "barrier_lsn", "recovery_id", "target_id"), _24(NULL), _25("gs_get_standby_cluster_barrier_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/src/common/backend/pgxc_single/barrier/barrier.cpp b/src/common/backend/pgxc_single/barrier/barrier.cpp index eef9408f1e..a100cfaa38 100755 --- a/src/common/backend/pgxc_single/barrier/barrier.cpp +++ b/src/common/backend/pgxc_single/barrier/barrier.cpp @@ -45,7 +45,6 @@ static void EndBarrier(PGXCNodeAllHandles* handles, const char* id, bool isSwitc static void CommitBarrier(PGXCNodeAllHandles* prepared_handles, const char* id); static void WriteBarrierLSNFile(XLogRecPtr barrierLSN, const char* barrier_id); static void replace_barrier_id_compatible(const char* id, char** log_id); -static void RequestXLogStreamForBarrier(); static void barrier_redo_pause(char* barrierId); static bool TryBarrierLockWithTimeout(); static void CheckBarrierCommandStatus(PGXCNodeAllHandles* conn_handles, const char* id, const char* command, bool isCn, @@ -551,8 +550,6 @@ bool is_barrier_pausable(const char* id) } #ifdef ENABLE_MULTIPLE_NODES - - static void SaveAllNodeBarrierLsnInfo(const char* id, const PGXCNodeAllHandles* connHandles) { int conn; @@ -1003,28 +1000,6 @@ void replace_barrier_id_compatible(const char* id, char** log_id) { *log_id = tmp_id; } -static void RequestXLogStreamForBarrier() -{ - XLogRecPtr replayEndPtr = GetXLogReplayRecPtr(NULL); - if (t_thrd.xlog_cxt.is_cascade_standby && (CheckForSwitchoverTrigger() || CheckForFailoverTrigger())) { - HandleCascadeStandbyPromote(&replayEndPtr); - return; - } - if (!WalRcvInProgress() && g_instance.pid_cxt.WalReceiverPID == 0) { - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - SpinLockAcquire(&walrcv->mutex); - walrcv->receivedUpto = 0; - SpinLockRelease(&walrcv->mutex); - if (t_thrd.xlog_cxt.readFile >= 0) { - (void)close(t_thrd.xlog_cxt.readFile); - t_thrd.xlog_cxt.readFile = -1; - } - - RequestXLogStreaming(&replayEndPtr, t_thrd.xlog_cxt.PrimaryConnInfo, REPCONNTARGET_PRIMARY, - u_sess->attr.attr_storage.PrimarySlotName); - } -} - static void barrier_redo_pause(char* barrierId) { if (!is_barrier_pausable(barrierId) || t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_TIME_OBS) { @@ -1066,4 +1041,4 @@ static void barrier_redo_pause(char* barrierId) } } } -#endif +#endif \ No newline at end of file diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 1bce896181..6c7a2caaf7 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -75,7 +75,8 @@ bool will_shutdown = false; * NEXT | 92899 | ? | ? * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92941; + +const uint32 GRAND_VERSION_NUM = 92942; /******************************************** * 2.VERSION NUM FOR EACH FEATURE diff --git a/src/gausskernel/process/postmaster/barrier_preparse.cpp b/src/gausskernel/process/postmaster/barrier_preparse.cpp index c3e86df3ab..b7d40e9210 100644 --- a/src/gausskernel/process/postmaster/barrier_preparse.cpp +++ b/src/gausskernel/process/postmaster/barrier_preparse.cpp @@ -38,6 +38,10 @@ #include "replication/walreceiver.h" #include "pgxc/barrier.h" #include "postmaster/barrier_preparse.h" +#include "utils/builtins.h" +#include "access/htup.h" +#include "funcapi.h" +#include "access/extreme_rto/dispatcher.h" typedef struct XLogPageReadPrivate { const char *datadir; @@ -168,6 +172,44 @@ void SetBarrierPreParseLsn(XLogRecPtr startptr) SpinLockRelease(&walrcv->mutex); } +bool check_preparse_result(XLogRecPtr *recptr) +{ + if (t_thrd.barrier_preparse_cxt.shutdown_requested) { + return false; + } + if (XLogRecPtrIsInvalid(g_instance.csn_barrier_cxt.lastValidRecord)) { + XLogRecPtr lastReplayRecPtr = InvalidXLogRecPtr; + (void)GetXLogReplayRecPtr(NULL, &lastReplayRecPtr); + if (XLogRecPtrIsInvalid(lastReplayRecPtr)) { + *recptr = lastReplayRecPtr; + } + return false; + } + return true; +} + +void RequestXLogStreamForBarrier() +{ + XLogRecPtr replayEndPtr = GetXLogReplayRecPtr(NULL); + if (t_thrd.xlog_cxt.is_cascade_standby && (CheckForSwitchoverTrigger() || CheckForFailoverTrigger())) { + HandleCascadeStandbyPromote(&replayEndPtr); + return; + } + if (!WalRcvInProgress() && g_instance.pid_cxt.WalReceiverPID == 0) { + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + SpinLockAcquire(&walrcv->mutex); + walrcv->receivedUpto = 0; + SpinLockRelease(&walrcv->mutex); + if (t_thrd.xlog_cxt.readFile >= 0) { + (void)close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + } + + RequestXLogStreaming(&replayEndPtr, t_thrd.xlog_cxt.PrimaryConnInfo, REPCONNTARGET_PRIMARY, + u_sess->attr.attr_storage.PrimarySlotName); + } +} + void BarrierPreParseMain(void) { volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; @@ -187,6 +229,11 @@ void BarrierPreParseMain(void) ereport(LOG, (errmsg("[BarrierPreParse] barrier preparse thread started"))); + /* Init preparse information*/ + g_instance.csn_barrier_cxt.preparseStartLocation = InvalidXLogRecPtr; + g_instance.csn_barrier_cxt.preparseEndLocation = InvalidXLogRecPtr; + g_instance.csn_barrier_cxt.lastValidRecord = InvalidXLogRecPtr; + /* * Reset some signals that are accepted by postmaster but not here */ @@ -227,6 +274,7 @@ void BarrierPreParseMain(void) g_instance.proc_base->BarrierPreParseLatch = &t_thrd.proc->procLatch; startLSN = walrcv->lastReceivedBarrierLSN; + g_instance.csn_barrier_cxt.preparseStartLocation = startLSN; ereport(LOG, (errmsg("[BarrierPreParse] preparse thread start at %08X/%08X", (uint32)(startLSN >> shiftSize), (uint32)startLSN))); @@ -287,6 +335,7 @@ void BarrierPreParseMain(void) break; } lastReadLSN = xlogreader->ReadRecPtr; + g_instance.csn_barrier_cxt.lastValidRecord = lastReadLSN; uint8 info = XLogRecGetInfo(xlogreader) & ~XLR_INFO_MASK; if (NEED_INSERT_INTO_HASH) { xLogBarrierId = XLogRecGetData(xlogreader); @@ -316,6 +365,11 @@ void BarrierPreParseMain(void) if (XLogRecPtrIsInvalid(xlogreader->ReadRecPtr) && errormsg) { ereport(LOG, (errmsg("[BarrierPreParse] preparse thread get an error info %s", errormsg))); } + g_instance.csn_barrier_cxt.preparseEndLocation = startLSN; + + if (check_preparse_result(&startLSN)) { + RequestXLogStreamForBarrier(); + } const long sleepTime = 1000; rc = WaitLatch(&t_thrd.proc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, sleepTime); if (((unsigned int)rc) & WL_POSTMASTER_DEATH) { @@ -334,3 +388,40 @@ void WakeUpBarrierPreParseBackend() } } } + +Datum gs_get_preparse_location(PG_FUNCTION_ARGS) +{ + if (!superuser() && !(isOperatoradmin(GetUserId()))) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser/sysadmin or operator admin to use gs_get_preparse_location()"))); + } + XLogRecPtr preparseStartLocation = g_instance.csn_barrier_cxt.preparseStartLocation; + XLogRecPtr preparseEndLocation = g_instance.csn_barrier_cxt.preparseEndLocation; + XLogRecPtr lastValidRecord = g_instance.csn_barrier_cxt.lastValidRecord; + + TupleDesc tupdesc; + Datum values[3]; + bool nulls[3] = {0}; + HeapTuple tuple; + Datum result; + char location[MAXFNAMELEN * 3] = {0}; + errno_t rc = EOK; + + const int COLUMN_NUM = 3; + tupdesc = CreateTemplateTupleDesc(COLUMN_NUM, false); + + TupleDescInitEntry(tupdesc, (AttrNumber)1, "preparse_start_location", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "preparse_end_location", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "last_valid_record", TEXTOID, -1, 0); + + BlessTupleDesc(tupdesc); + + values[0] = LsnGetTextDatum(preparseStartLocation); + values[1] = LsnGetTextDatum(preparseEndLocation); + values[2] = LsnGetTextDatum(lastValidRecord); + + tuple = heap_form_tuple(tupdesc, values, nulls); + result = HeapTupleGetDatum(tuple); + + PG_RETURN_DATUM(result); +} diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index ab328fa98f..45d0ca2a72 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -4425,6 +4425,10 @@ static int ServerLoop(void) if (IS_PGXC_COORDINATOR && g_instance.attr.attr_sql.max_resource_package && (g_instance.pid_cxt.CPMonitorPID == 0) && (pmState == PM_RUN) && !dummyStandbyMode) g_instance.pid_cxt.CPMonitorPID = initialize_util_thread(WLM_CPMONITOR); + + if (ENABLE_PREPARSE && g_instance.pid_cxt.BarrierPreParsePID == 0) { + g_instance.pid_cxt.BarrierPreParsePID = initialize_util_thread(BARRIER_PREPARSE); + } #ifndef ENABLE_LITE_MODE /* If we have lost the twophase cleaner, try to start a new one */ @@ -13642,10 +13646,10 @@ static void SetAuxType() case EXRTO_RECYCLER: t_thrd.bootstrap_cxt.MyAuxProcType = ExrtoRecyclerProcess; break; -#ifdef ENABLE_MULTIPLE_NODES case BARRIER_PREPARSE: t_thrd.bootstrap_cxt.MyAuxProcType = BarrierPreParseBackendProcess; break; +#ifdef ENABLE_MULTIPLE_NODES case TS_COMPACTION: t_thrd.bootstrap_cxt.MyAuxProcType = TsCompactionProcess; break; @@ -13945,11 +13949,11 @@ int GaussDbAuxiliaryThreadMain(knl_thread_arg* arg) extreme_rto::exrto_recycle_main(); proc_exit(1); break; -#ifdef ENABLE_MULTIPLE_NODES case BARRIER_PREPARSE: BarrierPreParseMain(); proc_exit(1); break; +#ifdef ENABLE_MULTIPLE_NODES case TS_COMPACTION: CompactionProcess::compaction_main(); proc_exit(1); @@ -14213,8 +14217,8 @@ int GaussDbThreadMain(knl_thread_arg* arg) case HEARTBEAT: case SHARE_STORAGE_XLOG_COPYER: case EXRTO_RECYCLER: -#ifdef ENABLE_MULTIPLE_NODES case BARRIER_PREPARSE: +#ifdef ENABLE_MULTIPLE_NODES case TS_COMPACTION: case TS_COMPACTION_CONSUMER: case TS_COMPACTION_AUXILIAY: @@ -14775,10 +14779,9 @@ static ThreadMetaData GaussdbThreadGate[] = { #endif { GaussDbThreadMain, DMS_AUXILIARY_THREAD, "dms_auxiliary", "maintenance xmin in dms" }, { GaussDbThreadMain, EXRTO_RECYCLER, "exrtorecycler", "exrto recycler" }, - + { GaussDbThreadMain, BARRIER_PREPARSE, "barrierpreparse", "barrier preparse backend" }, /* Keep the block in the end if it may be absent !!! */ #ifdef ENABLE_MULTIPLE_NODES - { GaussDbThreadMain, BARRIER_PREPARSE, "barrierpreparse", "barrier preparse backend" }, { GaussDbThreadMain, TS_COMPACTION, "TScompaction", "timeseries compaction" }, { GaussDbThreadMain, TS_COMPACTION_CONSUMER, "TScompconsumer", @@ -15038,36 +15041,13 @@ Datum disable_conn(PG_FUNCTION_ARGS) } /* - * Make sure that all xlog has been redo before locking. - * Sleep 0.5s is an auxiliary way to check whether all xlog has been redone. + * lock and start the preparse thread */ if (disconn_node.conn_mode == PROHIBIT_CONNECTION) { uint32 conn_mode = pg_atomic_read_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node); - while (checkTimes--) { - if (knl_g_get_redo_finish_status()) { - redoDone = true; - break; - } - ereport(LOG, (errmsg("%d redo_done", redoDone))); - sleep(0.01); - } - ereport(LOG, (errmsg("%d redo_done", redoDone))); - if (!redoDone) { - if (!conn_mode) { - pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, true); - // clean redo done - pg_atomic_write_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage, false); - } - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not add lock when DN is not redo all xlog, redo done flag is false"))); - } - - XLogRecPtr replay1 = GetXLogReplayRecPtrInPending(); - sleep(0.5); - XLogRecPtr replay2 = GetXLogReplayRecPtrInPending(); - if (replay1 != replay2) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not add lock when DN is not redo all xlog."))); - } + if (!WalRcvInProgress() && g_instance.pid_cxt.BarrierPreParsePID == 0) { + g_instance.csn_barrier_cxt.startBarrierPreParse = true; + } } else { pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, false); } @@ -15079,6 +15059,12 @@ Datum disable_conn(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_ATTRIBUTE), errmsg("Invalid null pointer attribute for disable_conn()"))); } + if (!WalRcvInProgress()) { + RequestXLogStreamForBarrier(); + } + if (g_instance.pid_cxt.BarrierPreParsePID == 0) { + g_instance.csn_barrier_cxt.startBarrierPreParse = false; + } host = TextDatumGetCString(arg1); ValidateInputString(host); if (!isVaildIp(host)) { diff --git a/src/gausskernel/process/threadpool/knl_instance.cpp b/src/gausskernel/process/threadpool/knl_instance.cpp index d5358fe26b..30da9b02cd 100755 --- a/src/gausskernel/process/threadpool/knl_instance.cpp +++ b/src/gausskernel/process/threadpool/knl_instance.cpp @@ -930,6 +930,9 @@ static void knl_g_csn_barrier_init(knl_g_csn_barrier_context* csn_barrier_cxt) errno_t rc = memset_s(csn_barrier_cxt->stopBarrierId, MAX_BARRIER_ID_LENGTH, 0, sizeof(csn_barrier_cxt->stopBarrierId)); securec_check(rc, "\0", "\0"); + csn_barrier_cxt->startBarrierPreParse = false; + csn_barrier_cxt->preparseStartLocation = InvalidXLogRecPtr; + csn_barrier_cxt->preparseEndLocation = InvalidXLogRecPtr; } static void knl_g_audit_init(knl_g_audit_context *audit_cxt) diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_942.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_942.sql new file mode 100644 index 0000000000..19662d367d --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_942.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_preparse_location() cascade; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_942.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_942.sql new file mode 100644 index 0000000000..19662d367d --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_942.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_preparse_location() cascade; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_942.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_942.sql new file mode 100644 index 0000000000..1f280deb68 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_942.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_preparse_location(); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2874; +CREATE FUNCTION pg_catalog.gs_get_preparse_location( + OUT preparse_start_location text, + OUT preparse_end_location text, + OUT last_valid_record text +) +RETURNS SETOF record +LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_get_preparse_location'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_942.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_942.sql new file mode 100644 index 0000000000..1f280deb68 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_942.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_preparse_location(); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2874; +CREATE FUNCTION pg_catalog.gs_get_preparse_location( + OUT preparse_start_location text, + OUT preparse_end_location text, + OUT last_valid_record text +) +RETURNS SETOF record +LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_get_preparse_location'; \ No newline at end of file diff --git a/src/include/knl/knl_instance.h b/src/include/knl/knl_instance.h index 024dc9eff4..3441388dda 100755 --- a/src/include/knl/knl_instance.h +++ b/src/include/knl/knl_instance.h @@ -788,6 +788,11 @@ typedef struct knl_g_csn_barrier_context { LWLock* barrier_hashtbl_lock; char stopBarrierId[MAX_BARRIER_ID_LENGTH]; MemoryContext barrier_context; + + bool startBarrierPreParse; + XLogRecPtr preparseStartLocation; + XLogRecPtr preparseEndLocation; + XLogRecPtr lastValidRecord; } knl_g_csn_barrier_context; typedef struct knl_g_comm_context { diff --git a/src/include/postmaster/barrier_preparse.h b/src/include/postmaster/barrier_preparse.h index 4dd96af156..9a2c4dc77f 100644 --- a/src/include/postmaster/barrier_preparse.h +++ b/src/include/postmaster/barrier_preparse.h @@ -45,8 +45,10 @@ ereport(WARNING, (errmsg("trying to delete a barrierID that does not exist"))); \ } while (0) +#define ENABLE_PREPARSE (g_instance.csn_barrier_cxt.startBarrierPreParse == true) extern void BarrierPreParseMain(void); extern void WakeUpBarrierPreParseBackend(void); extern void SetBarrierPreParseLsn(XLogRecPtr startptr); +extern void RequestXLogStreamForBarrier(); #endif /* BARRIER_PREPARSE_H */ -- Gitee From 386b379ad26a51a0f6adb5260ad888651f38e2e7 Mon Sep 17 00:00:00 2001 From: dongning12 Date: Tue, 23 Jul 2024 16:23:20 +0800 Subject: [PATCH 085/347] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=E3=80=90=E5=90=8C=E6=AD=A5DMS=E3=80=91?= =?UTF-8?q?=E3=80=90=E5=90=8C=E6=AD=A5CBB=E3=80=917.23=E5=90=8C=E6=AD=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 4 ++-- src/include/ddes/dms/dms_api.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 0fa5966c29..026c74ffd4 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=1dcd3d829655f517e24ba753c770f1b45cd5edf6 +dms_commit_id=106e4c8860d9c307f612829c9f44a19d0402962d dss_commit_id=b714d964156722f4353840c0f5bb66c92158e3c6 -cbb_commit_id=fff8dd7eb442c6aa9f50cd88cd7f3c2357325a3e +cbb_commit_id=7de11250903f6d5ade6deab3dc04fff6b29d0190 diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index d8cb1d4752..53e38ae9e6 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -34,7 +34,7 @@ extern "C" { #define DMS_LOCAL_MINOR_VER_WEIGHT 1000 #define DMS_LOCAL_MAJOR_VERSION 0 #define DMS_LOCAL_MINOR_VERSION 0 -#define DMS_LOCAL_VERSION 159 +#define DMS_LOCAL_VERSION 160 #define DMS_SUCCESS 0 #define DMS_ERROR (-1) @@ -1226,7 +1226,7 @@ typedef struct st_dms_profile { unsigned int resource_catalog_centralized : 1; // 1: centralized, 0: distributed unsigned int time_stat_enabled : 1; unsigned int reserved : 29; - unsigned int elapsed_switch; + unsigned char elapsed_switch; unsigned char rdma_rpc_use_busypoll; // busy poll need to occupy the cpu core unsigned char rdma_rpc_is_bind_core; unsigned char rdma_rpc_bind_core_start; -- Gitee From 1438051a34203b737b0fc342a2163996dbdd20ab Mon Sep 17 00:00:00 2001 From: z00848344 Date: Wed, 24 Jul 2024 09:04:22 +0800 Subject: [PATCH 086/347] On branch exp_dss Your branch is up to date with 'origin/exp_dss'. Changes to be committed: modified: src/gausskernel/ddes/adapter/ss_dms_recovery.cpp --- src/gausskernel/ddes/adapter/ss_dms_recovery.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp index 9fc18f536d..ddfcb38234 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp @@ -161,8 +161,7 @@ void SSInitReformerControlPages(void) struct stat st; if (stat(XLOG_CONTROL_FILE, &st) == 0 && S_ISREG(st.st_mode)) { SSReadControlFile(REFORM_CTRL_PAGE); - if (g_instance.dms_cxt.SSReformerControl.list_stable != 0 || - g_instance.dms_cxt.SSReformerControl.primaryInstId == SS_MY_INST_ID) { + if (g_instance.dms_cxt.SSReformerControl.primaryInstId == SS_MY_INST_ID) { (void)printf("[SS] ERROR: files from last install must be cleared.\n"); ereport(ERROR, (errmsg("Files from last initdb not cleared"))); } -- Gitee From 74059eaf888b4171667387923bc4e8aeb08c1c2b Mon Sep 17 00:00:00 2001 From: jiangyan <18091841830@163.com> Date: Wed, 24 Jul 2024 10:43:50 +0800 Subject: [PATCH 087/347] =?UTF-8?q?lock1=E6=81=A2=E5=A4=8D=E8=A6=81?= =?UTF-8?q?=E6=B1=82=E5=9B=9E=E6=94=BE=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../process/postmaster/postmaster.cpp | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index 45d0ca2a72..f1c747e474 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -15047,7 +15047,32 @@ Datum disable_conn(PG_FUNCTION_ARGS) uint32 conn_mode = pg_atomic_read_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node); if (!WalRcvInProgress() && g_instance.pid_cxt.BarrierPreParsePID == 0) { g_instance.csn_barrier_cxt.startBarrierPreParse = true; - } + while (checkTimes--) { + if (knl_g_get_redo_finish_status()) { + redoDone = true; + break; + } + ereport(LOG, (errmsg("%d redo_done", redoDone))); + sleep(0.01); + } + ereport(LOG, (errmsg("%d redo_done", redoDone))); + if (!redoDone) { + if (!conn_mode) { + pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, true); + // clean redo done + pg_atomic_write_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage, false); + } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not add lock when DN is not redo all xlog, redo done flag is false"))); + } + + XLogRecPtr replay1 = GetXLogReplayRecPtrInPending(); + sleep(0.5); + XLogRecPtr replay2 = GetXLogReplayRecPtrInPending(); + if (replay1 != replay2) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not add lock when DN is not redo all xlog."))); + } + } } else { pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, false); } -- Gitee From 0528470e1ba3fd741cdda53df665f7b05cf3ffa5 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Mon, 22 Jul 2024 10:46:48 +0800 Subject: [PATCH 088/347] =?UTF-8?q?=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BERTO?= =?UTF-8?q?=E6=80=A7=E8=83=BD=E4=BC=98=E5=8C=96=201.=20=E6=8C=89=E9=9C=80?= =?UTF-8?q?=E5=9B=9E=E6=94=BE=E8=B7=B3=E8=BF=87PreallocXlogFiles=202.=20fa?= =?UTF-8?q?ilover=E9=98=B6=E6=AE=B5=E4=B8=8D=E6=89=93=E5=BC=80=E5=85=A8?= =?UTF-8?q?=E9=83=A8=E7=9A=84=E6=AE=B5=E9=A1=B5=E5=BC=8F=E6=96=87=E4=BB=B6?= =?UTF-8?q?=203.=20=E5=AE=9E=E6=97=B6=E6=9E=84=E5=BB=BA=E5=8D=87=E4=B8=BB?= =?UTF-8?q?=E9=98=B6=E6=AE=B5=EF=BC=8CHashMap=E7=BA=BF=E7=A8=8B=E8=A3=81?= =?UTF-8?q?=E5=89=AA=E6=97=A5=E5=BF=97=E4=B8=8D=E9=9C=80=E8=A6=81=E7=AD=89?= =?UTF-8?q?=E5=BE=85=E6=AE=B5=E9=A1=B5=E5=BC=8F=E6=97=A5=E5=BF=97=E5=9B=9E?= =?UTF-8?q?=E6=94=BE=E5=AE=8C=E6=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/init/miscinit.cpp | 4 +-- .../ondemand_extreme_rto/page_redo.cpp | 7 +---- .../storage/access/transam/xlog.cpp | 28 +++++++++++-------- .../storage/smgr/segment/extent_group.cpp | 3 +- .../storage/smgr/segment/space.cpp | 2 +- 5 files changed, 21 insertions(+), 23 deletions(-) diff --git a/src/common/backend/utils/init/miscinit.cpp b/src/common/backend/utils/init/miscinit.cpp index 9830191be1..efc005334d 100644 --- a/src/common/backend/utils/init/miscinit.cpp +++ b/src/common/backend/utils/init/miscinit.cpp @@ -2124,8 +2124,8 @@ void initDssPath(char *dssdir) rc = snprintf_s(g_instance.datadir_cxt.multixactDir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_multixact", dssdir); securec_check_ss(rc, "", ""); - rc = snprintf_s(g_instance.datadir_cxt.xlogDir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_xlog%d", g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name, - g_instance.attr.attr_storage.dms_attr.instance_id); + rc = snprintf_s(g_instance.datadir_cxt.xlogDir, MAXPGPATH, MAXPGPATH - 1, "%s/pg_xlog%d", + g_instance.attr.attr_storage.dss_attr.ss_dss_xlog_vg_name, g_instance.attr.attr_storage.dms_attr.instance_id); securec_check_ss(rc, "", ""); rc = snprintf_s(g_instance.datadir_cxt.controlPath, MAXPGPATH, MAXPGPATH - 1, "%s/pg_control", dssdir); diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index 24e40592d2..e690e16c74 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -2034,7 +2034,7 @@ void SegWorkerProcLsnForwarder(RedoItem *lsnForwarder) } while (refCount != 1); // prune done, redo all seg block record - SegWorkerRedoAllSegBlockRecord(); + SegWorkerRedoIfRealtimeBuildFailover(); SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); @@ -3323,11 +3323,6 @@ static void HashMapManagerProcLsnForwarder(RedoItem *lsnForwarder) { SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); - uint32 refCount; - do { - refCount = pg_atomic_read_u32(&g_GlobalLsnForwarder.record.refcount); - RedoInterruptCallBack(); - } while (refCount != 0); } void HashMapManagerMain() diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index dd83fd1dbb..c0ea76c533 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -4006,10 +4006,6 @@ static int XLogFileInitInternal(XLogSegNo logsegno, bool *use_existent, bool use { char path[MAXPGPATH]; char tmppath[MAXPGPATH]; - char *zbuffer = NULL; - char* ss_zero_buffer_raw = NULL; - char* ss_zero_buffer = NULL; - char zbuffer_raw[XLOG_BLCKSZ + ALIGNOF_BUFFER]; XLogSegNo installed_segno; int max_advance; int fd; @@ -4074,11 +4070,9 @@ static int XLogFileInitInternal(XLogSegNo logsegno, bool *use_existent, bool use * Note: ensure the buffer is reasonably well-aligned; this may save a few * cycles transferring data to the kernel. */ - zbuffer = (char *)BUFFERALIGN(zbuffer_raw); - rc = memset_s(zbuffer, XLOG_BLCKSZ, 0, XLOG_BLCKSZ); - securec_check(rc, "\0", "\0"); - if (is_dss_fd(fd)) { + char *ss_zero_buffer = NULL; + char *ss_zero_buffer_raw = NULL; /* extend file and fill space at once to avoid performance issue */ pgstat_report_waitevent(WAIT_EVENT_WAL_INIT_WRITE); errno = 0; @@ -4122,6 +4116,11 @@ static int XLogFileInitInternal(XLogSegNo logsegno, bool *use_existent, bool use pfree(ss_zero_buffer_raw); } } else { + char zbuffer_raw[XLOG_BLCKSZ + ALIGNOF_BUFFER]; + char *zbuffer = (char *)BUFFERALIGN(zbuffer_raw); + rc = memset_s(zbuffer, XLOG_BLCKSZ, 0, XLOG_BLCKSZ); + securec_check(rc, "\0", "\0"); + for (nbytes = 0; (uint32)nbytes < XLogSegSize; nbytes += XLOG_BLCKSZ) { errno = 0; pgstat_report_waitevent(WAIT_EVENT_WAL_INIT_WRITE); @@ -5152,11 +5151,16 @@ static void ExecuteRecoveryCommand(char *command, char *commandName, bool failOn * a lot of segment creations by foreground processes, which is not so good. */ static void PreallocXlogFiles(XLogRecPtr endptr) -{ - /* In ss repplication dorado cluster, standby cluster doesn't need to preallocate xlog files */ - if (SS_DORADO_STANDBY_CLUSTER) { +{ + /* + * These condition do not need preallocate xlog files: + * 1. In ss repplication dorado cluster, standby cluster sync primary xlog + * 2. In ondemand recovery, we do not preallocate xlog files for better rto + */ + if (SS_DORADO_STANDBY_CLUSTER || SS_IN_ONDEMAND_RECOVERY) { return; } + XLogSegNo _logSegNo; int lf; bool use_existent = false; @@ -10718,7 +10722,7 @@ void StartupXLOG(void) uint32 redoReadOff = t_thrd.xlog_cxt.readOff; /* only primary mode can call getwritepermissionsharedstorage when dorado hyperreplication. */ - if(IS_SHARED_STORAGE_MODE) { + if (IS_SHARED_STORAGE_MODE) { GetWritePermissionSharedStorage(); CheckShareStorageCtlInfo(EndOfLog); } diff --git a/src/gausskernel/storage/smgr/segment/extent_group.cpp b/src/gausskernel/storage/smgr/segment/extent_group.cpp index f983957d3e..916e4de6be 100644 --- a/src/gausskernel/storage/smgr/segment/extent_group.cpp +++ b/src/gausskernel/storage/smgr/segment/extent_group.cpp @@ -109,10 +109,9 @@ void eg_init_df_ctrl(SegExtentGroup *seg) SegLogicFile *sf = (SegLogicFile *)palloc(sizeof(SegLogicFile)); MemoryContextSwitchTo(oldcnxt); df_ctrl_init(sf, seg->rnode, seg->forknum); - if (!SS_STANDBY_MODE) { + if (!ENABLE_DMS) { df_open_files(sf); } - seg->segfile = sf; } diff --git a/src/gausskernel/storage/smgr/segment/space.cpp b/src/gausskernel/storage/smgr/segment/space.cpp index c34489e3c3..3843de05a4 100644 --- a/src/gausskernel/storage/smgr/segment/space.cpp +++ b/src/gausskernel/storage/smgr/segment/space.cpp @@ -244,7 +244,7 @@ void InitSpaceNode(SegSpace *spc, Oid spcNode, Oid dbNode, bool is_redo) } } - if (SS_STANDBY_MODE) { + if (ENABLE_DMS) { SSInitSegLogicFile(spc); } } -- Gitee From 12431a59208ec735fe978c3d89bff3d0e632c2e4 Mon Sep 17 00:00:00 2001 From: z00848344 Date: Wed, 24 Jul 2024 16:30:20 +0800 Subject: [PATCH 089/347] On branch drop_dss Your branch is up to date with 'origin/drop_dss'. Changes to be committed: modified: src/gausskernel/ddes/adapter/ss_init.cpp --- src/gausskernel/ddes/adapter/ss_init.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/ddes/adapter/ss_init.cpp b/src/gausskernel/ddes/adapter/ss_init.cpp index 3f3d26c30a..7e078be4ab 100644 --- a/src/gausskernel/ddes/adapter/ss_init.cpp +++ b/src/gausskernel/ddes/adapter/ss_init.cpp @@ -68,7 +68,7 @@ static void scanURL(dms_profile_t* profile, char* ipportstr, int index) errmsg("invalid ip string: %s", ipstr))); } profile->inst_net_addr[index].port = (uint16)pg_strtoint32(portstr); - profile->inst_net_addr[index].inst_id = index; + profile->inst_net_addr[index].inst_id = ipportstr[0] - '0'; profile->inst_net_addr[index].need_connect = true; ret = strcpy_s(g_instance.dms_cxt.dmsInstAddr[index], DMS_MAX_IP_LEN, ipstr); -- Gitee From e7fb56830d7ccd71e2c7914e1438ac80bfe67501 Mon Sep 17 00:00:00 2001 From: jiwenke Date: Wed, 24 Jul 2024 17:26:07 +0800 Subject: [PATCH 090/347] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E6=94=AF=E6=8C=81Gen?= =?UTF-8?q?eric=20WAL=E6=8E=A5=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/CMakeLists.txt | 4 + contrib/pg_xlogdump/CMakeLists.txt | 1 + contrib/pg_xlogdump/rmgrdesc.cpp | 1 + src/common/backend/parser/parse_utilcmd.cpp | 1 + src/common/backend/utils/init/globals.cpp | 3 +- .../storage/access/rmgrdesc/Makefile | 4 +- .../storage/access/rmgrdesc/genericdesc.cpp | 59 +++ .../storage/access/transam/CMakeLists.txt | 1 + .../storage/access/transam/Makefile | 4 +- .../storage/access/transam/generic_xlog.cpp | 430 ++++++++++++++++++ .../storage/access/transam/rmgr.cpp | 1 + src/include/access/generic_xlog.h | 39 ++ src/include/access/rmgrlist.h | 1 + src/include/catalog/index.h | 1 + src/include/lib/simplehash.h | 19 +- src/include/miscadmin.h | 1 + src/include/port/pg_bitutils.h | 17 + 17 files changed, 564 insertions(+), 23 deletions(-) create mode 100644 src/gausskernel/storage/access/rmgrdesc/genericdesc.cpp create mode 100644 src/gausskernel/storage/access/transam/generic_xlog.cpp create mode 100644 src/include/access/generic_xlog.h diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 2de85bcf28..7777f0625a 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -25,6 +25,7 @@ set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/gc_fdw ${CMAKE_CURRENT_SOURCE_DIR}/ndpplugin ${CMAKE_CURRENT_SOURCE_DIR}/spq_plugin + ${CMAKE_CURRENT_SOURCE_DIR}/datavec ${CMAKE_CURRENT_SOURCE_DIR}/gms_stats ${CMAKE_CURRENT_SOURCE_DIR}/gms_profiler ) @@ -53,4 +54,7 @@ add_subdirectory(ndpplugin) if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/spq_plugin) add_subdirectory(spq_plugin) endif() +if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/datavec) + add_subdirectory(datavec) +endif() add_subdirectory(gms_profiler) diff --git a/contrib/pg_xlogdump/CMakeLists.txt b/contrib/pg_xlogdump/CMakeLists.txt index bfc02ff095..d53590b2c5 100644 --- a/contrib/pg_xlogdump/CMakeLists.txt +++ b/contrib/pg_xlogdump/CMakeLists.txt @@ -4,6 +4,7 @@ execute_process( COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/barrierdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/barrierdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/clogdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/clogdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/dbasedesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/dbasedesc.cpp + COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/genericdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/genericdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/gindesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/gindesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/gistdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/gistdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/hashdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/hashdesc.cpp diff --git a/contrib/pg_xlogdump/rmgrdesc.cpp b/contrib/pg_xlogdump/rmgrdesc.cpp index 11708296ae..3b3391ecb7 100644 --- a/contrib/pg_xlogdump/rmgrdesc.cpp +++ b/contrib/pg_xlogdump/rmgrdesc.cpp @@ -10,6 +10,7 @@ #include "knl/knl_variable.h" #include "access/clog.h" +#include "access/generic_xlog.h" #include "access/gin.h" #include "access/gist_private.h" #include "access/hash.h" diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index 6f0f821eb8..060f0731c5 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -4340,6 +4340,7 @@ IndexStmt* transformIndexStmt(Oid relid, IndexStmt* stmt, const char* queryStrin (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_GIN_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_GIST_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_IVFFLAT_INDEX_TYPE)) && + (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_HNSW_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_USTORE_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_HASH_INDEX_TYPE))) { /* row store only support btree/ubtree/gin/gist/hash index */ diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 6c7a2caaf7..0d1f7aeb74 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,7 +76,7 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92942; +const uint32 GRAND_VERSION_NUM = 92943; /******************************************** * 2.VERSION NUM FOR EACH FEATURE @@ -96,6 +96,7 @@ const uint32 PUBLICATION_DDL_VERSION_NUM = 92921; const uint32 UPSERT_ALIAS_VERSION_NUM = 92920; const uint32 SUPPORT_GS_DEPENDENCY_VERSION_NUM = 92916; const uint32 SPQ_VERSION_NUM = 92915; +const uint32 GENERICXLOG_VERSION_NUM = 92943; const uint32 PARTITION_ACCESS_EXCLUSIVE_LOCK_UPGRADE_VERSION = 92913; const uint32 PAGE_DIST_VERSION_NUM = 92912; const uint32 NODE_REFORM_INFO_VERSION_NUM = 92911; diff --git a/src/gausskernel/storage/access/rmgrdesc/Makefile b/src/gausskernel/storage/access/rmgrdesc/Makefile index 422ca04b32..4b48546f53 100644 --- a/src/gausskernel/storage/access/rmgrdesc/Makefile +++ b/src/gausskernel/storage/access/rmgrdesc/Makefile @@ -11,12 +11,12 @@ ifneq "$(MAKECMDGOALS)" "clean" endif ifeq ($(enable_mot), yes) -OBJS = barrierdesc.o clogdesc.o dbasedesc.o gindesc.o gistdesc.o \ +OBJS = barrierdesc.o clogdesc.o dbasedesc.o genericdesc.o gindesc.o gistdesc.o \ hashdesc.o heapdesc.o motdesc.o mxactdesc.o nbtdesc.o relmapdesc.o \ replorigindesc.o seqdesc.o smgrdesc.o spgdesc.o standbydesc.o tblspcdesc.o \ xactdesc.o xlogdesc.o slotdesc.o undologdesc.o uheapdesc.o segpagedesc.o logicalddlmsgdesc.o else -OBJS = barrierdesc.o clogdesc.o dbasedesc.o gindesc.o gistdesc.o \ +OBJS = barrierdesc.o clogdesc.o dbasedesc.o genericdesc.o gindesc.o gistdesc.o \ hashdesc.o heapdesc.o mxactdesc.o nbtdesc.o relmapdesc.o \ replorigindesc.o seqdesc.o smgrdesc.o spgdesc.o standbydesc.o tblspcdesc.o \ xactdesc.o xlogdesc.o slotdesc.o undologdesc.o uheapdesc.o segpagedesc.o logicalddlmsgdesc.o diff --git a/src/gausskernel/storage/access/rmgrdesc/genericdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/genericdesc.cpp new file mode 100644 index 0000000000..baa40b7ab2 --- /dev/null +++ b/src/gausskernel/storage/access/rmgrdesc/genericdesc.cpp @@ -0,0 +1,59 @@ +/*------------------------------------------------------------------------- + * + * genericdesc.cpp + * rmgr descriptor routines for access/transam/generic_xlog.cpp + * + * Portions Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/gausskernel/storage/access/rmgrdesc/genericdesc.cpp + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/generic_xlog.h" +#include "lib/stringinfo.h" +#include "storage/smgr/relfilenode.h" + +/* + * Description of generic xlog record: write page regions that this record + * overrides. + */ +void +generic_desc(StringInfo buf, XLogReaderState *record) +{ + errno_t ret = EOK; + Pointer ptr = XLogRecGetData(record), + end = ptr + XLogRecGetDataLen(record); + + while (ptr < end) { + OffsetNumber offset, length; + + ret = memcpy_s(&offset, sizeof(offset), ptr, sizeof(offset)); + securec_check(ret, "\0", "\0"); + ptr += sizeof(offset); + ret = memcpy_s(&length, sizeof(length), ptr, sizeof(length)); + securec_check(ret, "\0", "\0"); + ptr += sizeof(length); + ptr += length; + + if (ptr < end) + appendStringInfo(buf, "offset %u, length %u; ", offset, length); + else + appendStringInfo(buf, "offset %u, length %u", offset, length); + } + + return; +} + +/* + * Identification of generic xlog record: we don't distinguish any subtypes + * inside generic xlog records. + */ +const char * +generic_identify(uint8 info) +{ + return "Generic"; +} diff --git a/src/gausskernel/storage/access/transam/CMakeLists.txt b/src/gausskernel/storage/access/transam/CMakeLists.txt index 10b4f7ace1..93eaa58378 100755 --- a/src/gausskernel/storage/access/transam/CMakeLists.txt +++ b/src/gausskernel/storage/access/transam/CMakeLists.txt @@ -6,6 +6,7 @@ list(APPEND TGT_transam_SRC ${CMAKE_CURRENT_SOURCE_DIR}/csnlog.cpp ${CMAKE_CURRENT_SOURCE_DIR}/double_write.cpp ${CMAKE_CURRENT_SOURCE_DIR}/extreme_rto_redo_api.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/generic_xlog.cpp ${CMAKE_CURRENT_SOURCE_DIR}/single_double_write.cpp ${CMAKE_CURRENT_SOURCE_DIR}/multi_redo_api.cpp ${CMAKE_CURRENT_SOURCE_DIR}/multi_redo_settings.cpp diff --git a/src/gausskernel/storage/access/transam/Makefile b/src/gausskernel/storage/access/transam/Makefile index 351cb189c8..083924afff 100644 --- a/src/gausskernel/storage/access/transam/Makefile +++ b/src/gausskernel/storage/access/transam/Makefile @@ -12,12 +12,12 @@ endif ifeq ($(enable_multiple_nodes), yes) OBJS = clog.o multixact.o rmgr.o slru.o csnlog.o transam.o twophase.o \ twophase_rmgr.o varsup.o double_write.o single_double_write.o seg_double_write.o redo_statistic.o multi_redo_api.o multi_redo_settings.o\ - xact.o xlog.o xlogfuncs.o extreme_rto_redo_api.o \ + xact.o xlog.o xlogfuncs.o extreme_rto_redo_api.o generic_xlog.o \ xloginsert.o xlogreader.o xlogutils.o cbmparsexlog.o cbmfuncs.o else OBJS = clog.o gtm_single.o multixact.o rmgr.o slru.o csnlog.o transam.o twophase.o \ twophase_rmgr.o varsup.o double_write.o single_double_write.o seg_double_write.o redo_statistic.o multi_redo_api.o multi_redo_settings.o\ - xact.o xlog.o xlogfuncs.o extreme_rto_redo_api.o \ + xact.o xlog.o xlogfuncs.o extreme_rto_redo_api.o generic_xlog.o \ xloginsert.o xlogreader.o xlogutils.o cbmparsexlog.o cbmfuncs.o endif diff --git a/src/gausskernel/storage/access/transam/generic_xlog.cpp b/src/gausskernel/storage/access/transam/generic_xlog.cpp new file mode 100644 index 0000000000..e07a1b722b --- /dev/null +++ b/src/gausskernel/storage/access/transam/generic_xlog.cpp @@ -0,0 +1,430 @@ +/*------------------------------------------------------------------------- + * + * generic_xlog.cpp + * Implementation of generic xlog records. + * + * Portions Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/gausskernel/storage/access/transam/generic_xlog.cpp + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/generic_xlog.h" +#include "access/xlogproc.h" +#include "miscadmin.h" +#include "utils/memutils.h" + +/*------------------------------------------------------------------------- + * Internally, a delta between pages consists of a set of fragments. Each + * fragment represents changes made in a given region of a page. A fragment + * is made up as follows: + * + * - offset of page region (OffsetNumber) + * - length of page region (OffsetNumber) + * - data - the data to place into the region ('length' number of bytes) + * + * Unchanged regions of a page are not represented in its delta. As a + * result, a delta can be more compact than the full page image. But having + * an unchanged region in the middle of two fragments that is smaller than + * the fragment header (offset and length) does not pay off in terms of the + * overall size of the delta. For this reason, we break fragments only if + * the unchanged region is bigger than MATCH_THRESHOLD. + * + * The worst case for delta sizes occurs when we did not find any unchanged + * region in the page. The size of the delta will be the size of the page plus + * the size of the fragment header in that case. + *------------------------------------------------------------------------- + */ +#define FRAGMENT_HEADER_SIZE (2 * sizeof(OffsetNumber)) +#define MATCH_THRESHOLD FRAGMENT_HEADER_SIZE +#define MAX_DELTA_SIZE BLCKSZ + FRAGMENT_HEADER_SIZE + +/* Struct of generic xlog data for single page */ +typedef struct +{ + Buffer buffer; /* registered buffer */ + char image[BLCKSZ]; /* copy of page image for modification */ + char data[MAX_DELTA_SIZE]; /* delta between page images */ + int dataLen; /* space consumed in data field */ + int flags; /* flags for this buffer */ +} PageData; + +/* State of generic xlog record construction */ +struct GenericXLogState +{ + bool isLogged; + PageData pages[MAX_GENERIC_XLOG_PAGES]; +}; + +static void writeFragment(PageData *pageData, OffsetNumber offset, + OffsetNumber len, Pointer data); +static void writeDelta(PageData *pageData); +static void applyPageRedo(Page page, Pointer data, Size dataSize); + +/* + * Write next fragment into delta. + */ +static void +writeFragment(PageData *pageData, OffsetNumber offset, OffsetNumber length, + Pointer data) +{ + errno_t ret = EOK; + Pointer ptr = pageData->data + pageData->dataLen; + + /* Check if we have enough space */ + Assert(pageData->dataLen + sizeof(offset) + + sizeof(length) + length <= sizeof(pageData->data)); + + /* Write fragment data */ + ret = memcpy_s(ptr, MAX_DELTA_SIZE, &offset, sizeof(offset)); + securec_check(ret, "\0", "\0"); + ptr += sizeof(offset); + ret = memcpy_s(ptr, MAX_DELTA_SIZE - sizeof(offset), &length, sizeof(length)); + securec_check(ret, "\0", "\0"); + ptr += sizeof(length); + ret = memcpy_s(ptr, MAX_DELTA_SIZE - sizeof(offset) - sizeof(length), data, length); + securec_check(ret, "\0", "\0"); + ptr += length; + + pageData->dataLen = ptr - pageData->data; +} + +/* + * Make delta for given page. + */ +static void +writeDelta(PageData *pageData) +{ + Page page = BufferGetPage(pageData->buffer), + image = (Page) pageData->image; + int i, fragmentBegin = -1, fragmentEnd = -1; + uint16 pageLower = ((PageHeader) page)->pd_lower, + pageUpper = ((PageHeader) page)->pd_upper, + imageLower = ((PageHeader) image)->pd_lower, + imageUpper = ((PageHeader) image)->pd_upper; + + for (i = 0; i < BLCKSZ; i++) { + bool match; + + /* + * Check if bytes in old and new page images match. We do not care + * about data in the unallocated area between pd_lower and pd_upper. + * We assume the unallocated area to expand with unmatched bytes. + * Bytes inside the unallocated area are assumed to always match. + */ + if (i < pageLower) { + if (i < imageLower) + match = (page[i] == image[i]); + else + match = false; + } else if (i >= pageUpper) { + if (i >= imageUpper) + match = (page[i] == image[i]); + else + match = false; + } else { + match = true; + } + + if (match) { + if (fragmentBegin >= 0) { + /* Matched byte is potentially part of a fragment. */ + if (fragmentEnd < 0) + fragmentEnd = i; + + /* + * Write next fragment if sequence of matched bytes is longer + * than MATCH_THRESHOLD. + */ + if (i - fragmentEnd >= MATCH_THRESHOLD) { + writeFragment(pageData, fragmentBegin, + fragmentEnd - fragmentBegin, + page + fragmentBegin); + fragmentBegin = -1; + fragmentEnd = -1; + } + } + } else { + /* On unmatched byte, start new fragment if it is not done yet */ + if (fragmentBegin < 0) + fragmentBegin = i; + fragmentEnd = -1; + } + } + + if (fragmentBegin >= 0) + writeFragment(pageData, fragmentBegin, BLCKSZ - fragmentBegin, page + fragmentBegin); + +#ifdef WAL_DEBUG + /* + * If xlog debug is enabled, then check produced delta. Result of delta + * application to saved image should be the same as current page state. + */ + if (XLOG_DEBUG) { + errno_t ret = EOK; + char tmp[BLCKSZ]; + ret = memcpy_s(tmp, BLCKSZ, image, BLCKSZ); + securec_check(ret, "\0", "\0"); + applyPageRedo(tmp, pageData->data, pageData->dataLen); + if (memcmp(tmp, page, pageLower) + || memcmp(tmp + pageUpper, page + pageUpper, BLCKSZ - pageUpper)) + elog(ERROR, "result of generic xlog apply does not match"); + } +#endif +} + +/* + * Start new generic xlog record. + */ +GenericXLogState * +GenericXLogStart(Relation relation) +{ + int i; + GenericXLogState *state; + + if (t_thrd.proc->workingVersionNum < GENERICXLOG_VERSION_NUM) { + elog(ERROR, "workingVersionNum is lowwer than GENERICXLOG_VERSION_NUM, not supported!"); + return NULL; + } + + state = (GenericXLogState *) palloc(sizeof(GenericXLogState)); + + state->isLogged = RelationNeedsWAL(relation); + for (i = 0; i < MAX_GENERIC_XLOG_PAGES; i++) + state->pages[i].buffer = InvalidBuffer; + + return state; +} + +/* + * Register new buffer for generic xlog record. + */ +Page +GenericXLogRegisterBuffer(GenericXLogState *state, Buffer buffer, int flags) +{ + errno_t ret = EOK; + int block_id; + + if (state == NULL) { + elog(ERROR, "GenericXLogState invalid!"); + return NULL; + } + + /* Place new buffer to unused slot in array */ + for (block_id = 0; block_id < MAX_GENERIC_XLOG_PAGES; block_id++) { + PageData *page = &state->pages[block_id]; + if (BufferIsInvalid(page->buffer)) { + page->buffer = buffer; + ret = memcpy_s(page->image, BLCKSZ, BufferGetPage(buffer), BLCKSZ); + securec_check(ret, "\0", "\0"); + page->dataLen = 0; + page->flags = flags; + return (Page)page->image; + } else if (page->buffer == buffer) { + /* + * Buffer is already registered. Just return the image, which is + * already prepared. + */ + return (Page)page->image; + } + } + + elog(ERROR, "maximum number of %d generic xlog buffers is exceeded", + MAX_GENERIC_XLOG_PAGES); + + /* keep compiler quiet */ + return NULL; +} + +/* + * Unregister particular buffer for generic xlog record. + */ +void +GenericXLogUnregister(GenericXLogState *state, Buffer buffer) +{ + int block_id; + + if (state == NULL) { + elog(ERROR, "GenericXLogState invalid!"); + return; + } + + /* Find block in array to unregister */ + for (block_id = 0; block_id < MAX_GENERIC_XLOG_PAGES; block_id++) { + if (state->pages[block_id].buffer == buffer) { + /* + * Preserve order of pages in array because it could matter for + * concurrency. + */ + int ret = memmove_s(&state->pages[block_id], (MAX_GENERIC_XLOG_PAGES - block_id - 1) * sizeof(PageData), &state->pages[block_id + 1], + (MAX_GENERIC_XLOG_PAGES - block_id - 1) * sizeof(PageData)); + securec_check(ret, "\0", "\0"); + state->pages[MAX_GENERIC_XLOG_PAGES - 1].buffer = InvalidBuffer; + return; + } + } + + elog(ERROR, "registered generic xlog buffer not found"); +} + +/* + * Put all changes in registered buffers to generic xlog record. + */ +XLogRecPtr +GenericXLogFinish(GenericXLogState *state) +{ + XLogRecPtr lsn = InvalidXLogRecPtr; + int i; + errno_t ret = EOK; + + Assert(state != NULL); + + if (state->isLogged) { + /* Logged relation: make xlog record in critical section. */ + XLogBeginInsert(); + + START_CRIT_SECTION(); + + for (i = 0; i < MAX_GENERIC_XLOG_PAGES; i++) { + char tmp[BLCKSZ]; + PageData *page = &state->pages[i]; + + if (BufferIsInvalid(page->buffer)) + continue; + + /* Swap current and saved page image. */ + ret = memcpy_s(tmp, BLCKSZ, page->image, BLCKSZ); + securec_check(ret, "\0", "\0"); + ret = memcpy_s(page->image, BLCKSZ, BufferGetPage(page->buffer), BLCKSZ); + securec_check(ret, "\0", "\0"); + ret = memcpy_s(BufferGetPage(page->buffer), BLCKSZ, tmp, BLCKSZ); + securec_check(ret, "\0", "\0"); + + if (page->flags & GENERIC_XLOG_FULL_IMAGE) { + /* A full page image does not require anything special */ + XLogRegisterBuffer(i, page->buffer, REGBUF_FORCE_IMAGE); + } else { + /* + * In normal mode, calculate delta and write it as data + * associated with this page. + */ + XLogRegisterBuffer(i, page->buffer, REGBUF_STANDARD); + writeDelta(page); + XLogRegisterBufData(i, page->data, page->dataLen); + } + } + + /* Insert xlog record */ + lsn = XLogInsert(RM_GENERIC_ID, 0); + + /* Set LSN and mark buffers dirty */ + for (i = 0; i < MAX_GENERIC_XLOG_PAGES; i++) { + PageData *page = &state->pages[i]; + + if (BufferIsInvalid(page->buffer)) + continue; + PageSetLSN(BufferGetPage(page->buffer), lsn); + MarkBufferDirty(page->buffer); + } + END_CRIT_SECTION(); + } else { + /* Unlogged relation: skip xlog-related stuff */ + START_CRIT_SECTION(); + for (i = 0; i < MAX_GENERIC_XLOG_PAGES; i++) { + PageData *page = &state->pages[i]; + + if (BufferIsInvalid(page->buffer)) + continue; + ret = memcpy_s(BufferGetPage(page->buffer), BLCKSZ, page->image, BLCKSZ); + securec_check(ret, "\0", "\0"); + MarkBufferDirty(page->buffer); + } + END_CRIT_SECTION(); + } + + pfree(state); + + return lsn; +} + +/* + * Abort generic xlog record. + */ +void +GenericXLogAbort(GenericXLogState *state) +{ + pfree(state); +} + +/* + * Apply delta to given page image. + */ +static void +applyPageRedo(Page page, Pointer data, Size dataSize) +{ + errno_t ret = EOK; + Pointer ptr = data, end = data + dataSize; + + while (ptr < end) { + OffsetNumber offset, length; + + ret = memcpy_s(&offset, sizeof(offset), ptr, sizeof(offset)); + securec_check(ret, "\0", "\0"); + ptr += sizeof(offset); + ret =memcpy_s(&length, sizeof(length), ptr, sizeof(length)); + securec_check(ret, "\0", "\0"); + ptr += sizeof(length); + + ret = memcpy_s(page + offset, length, ptr, length); + securec_check(ret, "\0", "\0"); + + ptr += length; + } +} + +/* + * Redo function for generic xlog record. + */ +void +generic_redo(XLogReaderState *record) +{ + uint8 block_id; + RedoBufferInfo buffers[MAX_GENERIC_XLOG_PAGES]; + XLogRecPtr lsn = record->EndRecPtr; + + Assert(record->max_block_id < MAX_GENERIC_XLOG_PAGES); + + /* Iterate over blocks */ + for (block_id = 0; block_id <= record->max_block_id; block_id++) { + XLogRedoAction action; + + if (!XLogRecHasBlockRef(record, block_id)) + continue; + + action = XLogReadBufferForRedo(record, block_id, &buffers[block_id]); + + /* Apply redo to given block if needed */ + if (action == BLK_NEEDS_REDO) { + Pointer blockData; + Size blockDataSize; + Page page; + + page = BufferGetPage(buffers[block_id].buf); + blockData = XLogRecGetBlockData(record, block_id, &blockDataSize); + applyPageRedo(page, blockData, blockDataSize); + + PageSetLSN(page, lsn); + MarkBufferDirty(buffers[block_id].buf); + } + } + + /* Changes are done: unlock and release all buffers */ + for (block_id = 0; block_id <= record->max_block_id; block_id++) { + if (BufferIsValid(buffers[block_id].buf)) + UnlockReleaseBuffer(buffers[block_id].buf); + } +} diff --git a/src/gausskernel/storage/access/transam/rmgr.cpp b/src/gausskernel/storage/access/transam/rmgr.cpp index 797f8bd6dd..7fd0a54bb9 100644 --- a/src/gausskernel/storage/access/transam/rmgr.cpp +++ b/src/gausskernel/storage/access/transam/rmgr.cpp @@ -26,6 +26,7 @@ #include "knl/knl_variable.h" #include "access/clog.h" +#include "access/generic_xlog.h" #include "access/gin.h" #include "access/gist_private.h" #include "access/hash.h" diff --git a/src/include/access/generic_xlog.h b/src/include/access/generic_xlog.h new file mode 100644 index 0000000000..c6f5da1d1f --- /dev/null +++ b/src/include/access/generic_xlog.h @@ -0,0 +1,39 @@ +/*------------------------------------------------------------------------- + * + * generic_xlog.h + * Generic xlog API definition. + * + * src/include/access/generic_xlog.h + * + *------------------------------------------------------------------------- + */ +#ifndef GENERIC_XLOG_H +#define GENERIC_XLOG_H + +#include "access/xlog.h" +#include "access/xlog_internal.h" +#include "access/xloginsert.h" +#include "storage/buf/bufpage.h" +#include "utils/rel.h" + +#define MAX_GENERIC_XLOG_PAGES XLR_NORMAL_MAX_BLOCK_ID +#define GENERIC_XLOG_FULL_IMAGE 0x0001 /* write full-page image */ + +/* state of generic xlog record construction */ +struct GenericXLogState; +typedef struct GenericXLogState GenericXLogState; + +/* API for construction of generic xlog records */ +extern GenericXLogState *GenericXLogStart(Relation relation); +extern Page GenericXLogRegisterBuffer(GenericXLogState *state, Buffer buffer, + int flags); +extern void GenericXLogUnregister(GenericXLogState *state, Buffer buffer); +extern XLogRecPtr GenericXLogFinish(GenericXLogState *state); +extern void GenericXLogAbort(GenericXLogState *state); + +/* functions defined for rmgr */ +extern void generic_redo(XLogReaderState *record); +extern const char *generic_identify(uint8 info); +extern void generic_desc(StringInfo buf, XLogReaderState *record); + +#endif /* GENERIC_XLOG_H */ diff --git a/src/include/access/rmgrlist.h b/src/include/access/rmgrlist.h index 65d427f070..730ccb6fc0 100644 --- a/src/include/access/rmgrlist.h +++ b/src/include/access/rmgrlist.h @@ -82,3 +82,4 @@ PG_RMGR(RM_COMPRESSION_REL_ID, "CompressionRelation", CfsShrinkRedo, CfsShrinkDe CfsShrinkTypeName) PG_RMGR(RM_LOGICALDDLMSG_ID, "LogicalDDLMessage", logicalddlmsg_redo, logicalddlmsg_desc, NULL, NULL, NULL, NULL, NULL, \ logicalddlmsg_type_name) +PG_RMGR(RM_GENERIC_ID, "Generic", generic_redo, generic_desc, NULL, NULL, NULL, NULL, NULL, NULL) diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h index 2acdc1338f..4e322f6f47 100644 --- a/src/include/catalog/index.h +++ b/src/include/catalog/index.h @@ -29,6 +29,7 @@ #define CSTORE_GINBTREE_INDEX_TYPE "cgin" #define DEFAULT_USTORE_INDEX_TYPE "ubtree" #define DEFAULT_IVFFLAT_INDEX_TYPE "ivfflat" +#define DEFAULT_HNSW_INDEX_TYPE "hnsw" /* Typedef for callback function for IndexBuildHeapScan */ typedef void (*IndexBuildCallback)(Relation index, HeapTuple htup, Datum *values, const bool *isnull, diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h index 7f4b305f35..75567dd14a 100644 --- a/src/include/lib/simplehash.h +++ b/src/include/lib/simplehash.h @@ -300,23 +300,6 @@ SH_SCOPE void SH_STAT(SH_TYPE *tb); #endif -/* calculate ceil(log base 2) of num */ -static inline uint64 sh_log2(uint64 num) -{ - int i; - uint64 limit; - - for (i = 0, limit = 1; limit < num; i++, limit <<= 1) - ; - return i; -} - -/* calculate first power of 2 >= num */ -static inline uint64 sh_pow2(uint64 num) -{ - return ((uint64)1) << sh_log2(num); -} - /* * Compute sizing parameters for hashtable. Called when creating and growing * the hashtable. @@ -1143,4 +1126,4 @@ SH_SCOPE void SH_STAT(SH_TYPE *tb) #undef SH_DISTANCE_FROM_OPTIMAL #undef SH_ENTRY_HASH #undef SH_INSERT_HASH_INTERNAL -#undef SH_LOOKUP_HASH_INTERNAL \ No newline at end of file +#undef SH_LOOKUP_HASH_INTERNAL diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 6e07ede951..824e2bf321 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -141,6 +141,7 @@ extern const uint32 CREATE_TABLE_AS_VERSION_NUM; extern const uint32 GB18030_2022_VERSION_NUM; extern const uint32 PARTITION_ACCESS_EXCLUSIVE_LOCK_UPGRADE_VERSION; extern const uint32 SPQ_VERSION_NUM; +extern const uint32 GENERICXLOG_VERSION_NUM; extern const uint32 UPSERT_ALIAS_VERSION_NUM; extern const uint32 SELECT_STMT_HAS_USERVAR; extern const uint32 PUBLICATION_DDL_VERSION_NUM; diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h index fe55c924bb..7a55c46b56 100644 --- a/src/include/port/pg_bitutils.h +++ b/src/include/port/pg_bitutils.h @@ -159,4 +159,21 @@ static inline uint32 pg_rotate_right32(uint32 word, int n) return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n)); } +/* calculate ceil(log base 2) of num */ +static inline uint64 sh_log2(uint64 num) +{ + int i; + uint64 limit; + + for (i = 0, limit = 1; limit < num; i++, limit <<= 1) + ; + return i; +} + +/* calculate first power of 2 >= num */ +static inline uint64 sh_pow2(uint64 num) +{ + return ((uint64)1) << sh_log2(num); +} + #endif /* PG_BITUTILS_H */ -- Gitee From 74f5335e455396334046d2cc009c7d0983eb6bcd Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Wed, 24 Jul 2024 20:07:21 +0800 Subject: [PATCH 091/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8DB=E5=BA=93=E9=80=9A?= =?UTF-8?q?=E8=BF=87spi=E6=89=A7=E8=A1=8Cabort=E5=BC=82=E5=B8=B8=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/runtime/executor/spi.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/gausskernel/runtime/executor/spi.cpp b/src/gausskernel/runtime/executor/spi.cpp index 43168348cf..ba85ce5472 100644 --- a/src/gausskernel/runtime/executor/spi.cpp +++ b/src/gausskernel/runtime/executor/spi.cpp @@ -2802,11 +2802,7 @@ static int _SPI_execute_plan0(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot s my_res = SPI_ERROR_COPY; goto fail; } - } else if (IsA(stmt, TransactionStmt) -#ifndef ENABLE_MULTIPLE_NODES - && !u_sess->attr.attr_sql.dolphin -#endif - ) { + } else if (IsA(stmt, TransactionStmt)) { my_res = SPI_ERROR_TRANSACTION; goto fail; } -- Gitee From 364cad1dd21cad7c5769b1037c65c7d1410c3981 Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Thu, 25 Jul 2024 09:38:02 +0800 Subject: [PATCH 092/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Drepl=5Fscanner.l?= =?UTF-8?q?=E4=B8=AD=E7=9A=84=E6=8A=A5=E9=94=99=E8=A2=AB=E5=88=A0=E9=99=A4?= =?UTF-8?q?=E5=90=8E=E5=AF=BC=E8=87=B4CI=E7=94=A8=E4=BE=8B=E6=B2=A1?= =?UTF-8?q?=E8=BF=87=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/replication/repl_scanner.l | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/replication/repl_scanner.l b/src/gausskernel/storage/replication/repl_scanner.l index bcfc125d8a..46107a39c6 100755 --- a/src/gausskernel/storage/replication/repl_scanner.l +++ b/src/gausskernel/storage/replication/repl_scanner.l @@ -202,7 +202,13 @@ WAIT { return K_WAIT; } } . { - return T_WORD; + if (u_sess->proc_cxt.clientIsSubscription) { + return T_WORD; + } + + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("syntax error: unexpected character \"%s\"", yytext))); } %% -- Gitee From b5c04bb6dd5539f4be2dbdfe6bc0f5ebc791b6ca Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Thu, 25 Jul 2024 16:00:22 +0800 Subject: [PATCH 093/347] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B5=AE=E7=82=B9?= =?UTF-8?q?=E6=95=B0=E7=89=B9=E6=AE=8A=E5=80=BC=E6=98=BE=E7=A4=BAGUC?= =?UTF-8?q?=E6=8E=A7=E5=88=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + src/common/backend/utils/adt/float.cpp | 36 +++- src/common/backend/utils/misc/guc/guc_sql.cpp | 11 ++ .../knl/knl_guc/knl_session_attr_sql.h | 1 + .../regress/expected/test_binary_suffix.out | 159 +++++++++++------- src/test/regress/sql/test_binary_suffix.sql | 9 + 6 files changed, 150 insertions(+), 67 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 762256d80e..ddfd0467bb 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -359,6 +359,7 @@ wdr_snapshot_retention_days|int|1,8|NULL|NULL| wdr_snapshot_query_timeout|int|100,2147483647|s|NULL| enable_wdr_snapshot|bool|0,0|NULL|NULL| enable_set_variable_b_format|bool|0,0|NULL|NULL| +enable_binary_special_o_format|bool|0,0|NULL|NULL| enable_asp|bool|0,0|NULL|NULL| enable_startwith_debug|bool|0,0|NULL|NULL| enable_stmt_track|bool|0,0|NULL|NULL| diff --git a/src/common/backend/utils/adt/float.cpp b/src/common/backend/utils/adt/float.cpp index 0d6a4d74d8..01e700c47f 100644 --- a/src/common/backend/utils/adt/float.cpp +++ b/src/common/backend/utils/adt/float.cpp @@ -397,18 +397,30 @@ Datum float4out(PG_FUNCTION_ARGS) errno_t rc = EOK; if (isnan(num)) { - rc = strcpy_s(ascii, MAXFLOATWIDTH + 1, "NaN"); + if (u_sess->attr.attr_sql.enable_binary_special_o_format) { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Nan"); + } else { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "NaN"); + } securec_check_ss(rc, "\0", "\0"); PG_RETURN_CSTRING(ascii); } switch (is_infinite(num)) { case 1: - rc = strcpy_s(ascii, MAXFLOATWIDTH + 1, "Infinity"); + if (u_sess->attr.attr_sql.enable_binary_special_o_format) { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Inf"); + } else { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Infinity"); + } securec_check_ss(rc, "\0", "\0"); break; case -1: - rc = strcpy_s(ascii, MAXFLOATWIDTH + 1, "-Infinity"); + if (u_sess->attr.attr_sql.enable_binary_special_o_format) { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "-Inf"); + } else { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "-Infinity"); + } securec_check_ss(rc, "\0", "\0"); break; default: { @@ -627,17 +639,29 @@ Datum float8out(PG_FUNCTION_ARGS) errno_t rc = EOK; if (isnan(num)) { - rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "NaN"); + if (u_sess->attr.attr_sql.enable_binary_special_o_format) { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Nan"); + } else { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "NaN"); + } securec_check(rc, "\0", "\0"); PG_RETURN_CSTRING(ascii); } switch (is_infinite(num)) { case 1: - rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Infinity"); + if (u_sess->attr.attr_sql.enable_binary_special_o_format) { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Inf"); + } else { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "Infinity"); + } securec_check(rc, "\0", "\0"); break; case -1: - rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "-Infinity"); + if (u_sess->attr.attr_sql.enable_binary_special_o_format) { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "-Inf"); + } else { + rc = strcpy_s(ascii, MAXDOUBLEWIDTH + 1, "-Infinity"); + } securec_check(rc, "\0", "\0"); break; default: { diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index bd7328666f..eeb0c890f5 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -663,6 +663,17 @@ static void InitSqlConfigureNamesBool() NULL, NULL, NULL}, + {{"enable_binary_special_o_format", + PGC_USERSET, + NODE_ALL, + QUERY_TUNING_COST, + gettext_noop("Allows the user to specify the value of a double-precision floating-point parameter."), + NULL}, + &u_sess->attr.attr_sql.enable_binary_special_o_format, + false, + NULL, + NULL, + NULL}, {{"enable_seqscan", PGC_USERSET, NODE_ALL, diff --git a/src/include/knl/knl_guc/knl_session_attr_sql.h b/src/include/knl/knl_guc/knl_session_attr_sql.h index e93f14bbd1..12ef618de2 100644 --- a/src/include/knl/knl_guc/knl_session_attr_sql.h +++ b/src/include/knl/knl_guc/knl_session_attr_sql.h @@ -127,6 +127,7 @@ typedef struct knl_session_attr_sql { bool enable_light_proxy; bool enable_pbe_optimization; bool enable_cluster_resize; + bool enable_binary_special_o_format; bool lo_compat_privileges; bool quote_all_identifiers; bool enforce_a_behavior; diff --git a/src/test/regress/expected/test_binary_suffix.out b/src/test/regress/expected/test_binary_suffix.out index 3de471e99b..829f3d54d5 100644 --- a/src/test/regress/expected/test_binary_suffix.out +++ b/src/test/regress/expected/test_binary_suffix.out @@ -433,16 +433,53 @@ ERROR: syntax error at or near "/" LINE 1: SELECT 4.0f / 2.0; ^ set float_suffix_acceptance to on; +SELECT CONCAT('The value is ', TO_CHAR(BINARY_DOUBLE_INFINITY)) AS result; + result +----------------------- + The value is Infinity +(1 row) + +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_INFINITY)) AS result; + result +------------------------ + The value is -Infinity +(1 row) + +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_NAN)) AS result; + result +------------------ + The value is NaN +(1 row) + +set enable_binary_special_o_format to on; +SELECT CONCAT('The value is ', TO_CHAR(BINARY_DOUBLE_INFINITY)) AS result; + result +------------------ + The value is Inf +(1 row) + +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_INFINITY)) AS result; + result +------------------- + The value is -Inf +(1 row) + +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_NAN)) AS result; + result +------------------ + The value is Nan +(1 row) + SELECT BINARY_DOUBLE_INFINITY; binary_double_infinity ------------------------ - Infinity + Inf (1 row) SELECT BINARY_DOUBLE_NAN; binary_double_nan ------------------- - NaN + Nan (1 row) CREATE TABLE test_table ( @@ -454,7 +491,7 @@ VALUES (1, BINARY_DOUBLE_NAN); SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_NAN; id | float_value ----+------------- - 1 | NaN + 1 | Nan (1 row) INSERT INTO test_table (id, float_value) @@ -462,75 +499,75 @@ VALUES (2, BINARY_DOUBLE_INFINITY); SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_INFINITY; id | float_value ----+------------- - 2 | Infinity + 2 | Inf (1 row) SELECT float_value + 1 FROM test_table WHERE id = 1; ?column? ---------- - NaN + Nan (1 row) SELECT float_value * 2 FROM test_table WHERE id = 2; ?column? ---------- - Infinity + Inf (1 row) SELECT * FROM test_table WHERE float_value = BINARY_DOUBLE_NAN OR float_value = BINARY_DOUBLE_INFINITY; id | float_value ----+------------- - 1 | NaN - 2 | Infinity + 1 | Nan + 2 | Inf (2 rows) DROP TABLE test_table; SELECT ABS(binary_double_infinity); - abs ----------- - Infinity + abs +----- + Inf (1 row) SELECT CEIL(binary_double_infinity); - ceil ----------- - Infinity + ceil +------ + Inf (1 row) SELECT SQRT(binary_double_infinity); - sqrt ----------- - Infinity + sqrt +------ + Inf (1 row) SELECT FLOOR(binary_double_infinity); - floor ----------- - Infinity + floor +------- + Inf (1 row) SELECT SQRT(binary_double_infinity); - sqrt ----------- - Infinity + sqrt +------ + Inf (1 row) SELECT EXP(binary_double_infinity); - exp ----------- - Infinity + exp +----- + Inf (1 row) SELECT LOG(binary_double_infinity); - log ----------- - Infinity + log +----- + Inf (1 row) SELECT POWER(binary_double_infinity, 2); - power ----------- - Infinity + power +------- + Inf (1 row) SELECT SIN(binary_double_infinity); @@ -539,55 +576,55 @@ CONTEXT: referenced column: sin SELECT ABS(binary_double_nan); abs ----- - NaN + Nan (1 row) SELECT CEIL(binary_double_nan); ceil ------ - NaN + Nan (1 row) SELECT SQRT(binary_double_nan); sqrt ------ - NaN + Nan (1 row) SELECT FLOOR(binary_double_nan); floor ------- - NaN + Nan (1 row) SELECT SQRT(binary_double_nan); sqrt ------ - NaN + Nan (1 row) SELECT EXP(binary_double_nan); exp ----- - NaN + Nan (1 row) SELECT LOG(binary_double_nan); log ----- - NaN + Nan (1 row) SELECT POWER(binary_double_nan, 2); power ------- - NaN + Nan (1 row) SELECT SIN(binary_double_nan); sin ----- - NaN + Nan (1 row) -- 创建自定义函数 @@ -668,49 +705,49 @@ SELECT (binary_double_infinity > 0.0) OR (binary_double_nan < binary_double_infi SELECT binary_double_infinity + 1.0; -- 返回正无穷大 ?column? ---------- - Infinity + Inf (1 row) SELECT binary_double_nan + 1.0; -- 返回 NaN ?column? ---------- - NaN + Nan (1 row) SELECT binary_double_infinity - binary_double_infinity; -- 返回 NaN ?column? ---------- - NaN + Nan (1 row) SELECT binary_double_nan - 1.0; -- 返回 NaN ?column? ---------- - NaN + Nan (1 row) SELECT binary_double_infinity * binary_double_infinity; -- 返回正无穷大 ?column? ---------- - Infinity + Inf (1 row) SELECT binary_double_nan * 1.0; -- 返回 NaN ?column? ---------- - NaN + Nan (1 row) SELECT binary_double_infinity / binary_double_infinity; -- 返回 NaN ?column? ---------- - NaN + Nan (1 row) SELECT binary_double_nan / 1.0; -- 返回 NaN ?column? ---------- - NaN + Nan (1 row) CREATE TABLE T1(binary_double_nan INT); @@ -718,15 +755,15 @@ INSERT INTO T1 VALUES(1),(2),(3); SELECT binary_double_nan; binary_double_nan ------------------- - NaN + Nan (1 row) SELECT binary_double_nan FROM T1; binary_double_nan ------------------- - NaN - NaN - NaN + Nan + Nan + Nan (3 rows) SELECT T1.binary_double_nan FROM T1; @@ -770,19 +807,19 @@ insert into t1 values(binary_double_nan); select avg(id) from t1; avg ----- - NaN + Nan (1 row) select sum(id) from t1; sum ----- - NaN + Nan (1 row) select max(id) from t1; max ----- - NaN + Nan (1 row) select min(id) from t1; @@ -800,9 +837,9 @@ insert into t1 values(3.14),(10),(15); select binary_double_nan from t1; binary_double_nan ------------------- - NaN - NaN - NaN + Nan + Nan + Nan (3 rows) select t1.binary_double_nan from t1; @@ -820,7 +857,7 @@ insert into t1 values (100,9); select * from t1; binary_double_infinity | id ------------------------+---- - Infinity | 10 + Inf | 10 100 | 9 (2 rows) @@ -832,7 +869,7 @@ select * from t1 where binary_double_infinity = 100; select * from t1 where binary_double_infinity = binary_double_infinity; binary_double_infinity | id ------------------------+---- - Infinity | 10 + Inf | 10 100 | 9 (2 rows) diff --git a/src/test/regress/sql/test_binary_suffix.sql b/src/test/regress/sql/test_binary_suffix.sql index 213d834055..b253461562 100644 --- a/src/test/regress/sql/test_binary_suffix.sql +++ b/src/test/regress/sql/test_binary_suffix.sql @@ -157,6 +157,15 @@ SELECT 4.0f / 2.0; set float_suffix_acceptance to on; + +SELECT CONCAT('The value is ', TO_CHAR(BINARY_DOUBLE_INFINITY)) AS result; +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_INFINITY)) AS result; +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_NAN)) AS result; +set enable_binary_special_o_format to on; +SELECT CONCAT('The value is ', TO_CHAR(BINARY_DOUBLE_INFINITY)) AS result; +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_INFINITY)) AS result; +SELECT CONCAT('The value is ', TO_CHAR(-BINARY_DOUBLE_NAN)) AS result; + SELECT BINARY_DOUBLE_INFINITY; SELECT BINARY_DOUBLE_NAN; -- Gitee From b7cd8b830aeaa4571f94b926a60f946cba9824b9 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 25 Jul 2024 21:46:54 +0800 Subject: [PATCH 094/347] =?UTF-8?q?B=E5=BA=93=E6=94=AF=E6=8C=81=E5=88=9B?= =?UTF-8?q?=E5=BB=BA=E5=88=86=E5=8C=BA=E8=A1=A8=E6=97=B6=E6=8C=87=E5=AE=9A?= =?UTF-8?q?=E5=A4=A7=E4=BA=8EMAXVALUE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/cbb/utils/partition/partitionmap.cpp | 5 ++++- src/include/knl/knl_session.h | 1 + src/include/utils/partitionmap.h | 10 ++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/cbb/utils/partition/partitionmap.cpp b/src/gausskernel/cbb/utils/partition/partitionmap.cpp index d4e44b7ba2..80d93426ad 100755 --- a/src/gausskernel/cbb/utils/partition/partitionmap.cpp +++ b/src/gausskernel/cbb/utils/partition/partitionmap.cpp @@ -550,6 +550,9 @@ void unserializePartitionStringAttribute(Const** outMaxValue, int outMaxValueLen value = OidFunctionCall3Coll( func, typcollation, CStringGetDatum(max_value->val.str), ObjectIdGetDatum(typelem), Int32GetDatum(typmod)); /* save the output values */ + if (u_sess->attr.attr_sql.dolphin) { + typid = partKeyDataType[counter]; + } outMaxValue[counter++] = makeConst(typid, typmod, typcollation, typlen, value, false, typbyval); } list_free_ext(boundary); @@ -755,7 +758,7 @@ int2vector* getPartitionKeyAttrNo( partkey->values[i] = attnum; for (j = 0; j < base_table_tupledsc->natts; j++) { if (attnum == rel_attrs[j].attnum) { - oidArr[i] = rel_attrs[j].atttypid; + oidArr[i] = GetAttTypeOid(rel_attrs[j].atttypid); break; } } diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 3184dcf9ef..058bd54e7b 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -2939,6 +2939,7 @@ typedef struct knl_u_hook_context { void *replaceNullOrNotHook; void *nullsMinimalPolicyHook; void *getIgnoreKeywordTokenHook; + void *modifyTypeForPartitionKeyHook; } knl_u_hook_context; typedef struct knl_u_libsw_context { diff --git a/src/include/utils/partitionmap.h b/src/include/utils/partitionmap.h index 3eea5a0ae3..f958962e95 100644 --- a/src/include/utils/partitionmap.h +++ b/src/include/utils/partitionmap.h @@ -28,6 +28,7 @@ #define PARTITIONMAP_H_ #include "postgres.h" +#include "knl/knl_session.h" #include "access/htup.h" #include "catalog/pg_type.h" #include "nodes/primnodes.h" @@ -186,4 +187,13 @@ extern void constCompare(Const* value1, Const* value2, Oid collation, int& compa extern struct ListPartElement* CopyListElements(ListPartElement* src, int elementNum); extern struct HashPartElement* CopyHashElements(HashPartElement* src, int elementNum, int partkeyNum); +typedef Oid (*modifyTypeForPartitionKey)(Oid attType); +extern inline Oid GetAttTypeOid(Oid attType) +{ + if (u_sess->hook_cxt.modifyTypeForPartitionKeyHook != NULL) { + return ((modifyTypeForPartitionKey)(u_sess->hook_cxt.modifyTypeForPartitionKeyHook))(attType); + } + return attType; +} + #endif /* PARTITIONMAP_H_ */ -- Gitee From ffd64175d37638fe2098ad9e4cde8fc124bf1b73 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Thu, 25 Jul 2024 22:28:27 +0800 Subject: [PATCH 095/347] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91:?= =?UTF-8?q?=20=E4=BF=AE=E5=A4=8DIAEZOR=E6=89=80=E7=A4=BA=E7=9A=84=E5=A4=A7?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E9=87=8F=E4=B8=8B=E6=89=A7=E8=A1=8C=E6=B8=B8?= =?UTF-8?q?=E6=A0=87=E8=A1=A8=E8=BE=BE=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E3=80=91:=20=E4=BF=AE=E5=A4=8DIAEZOR=E6=89=80=E7=A4=BA?= =?UTF-8?q?=E7=9A=84=E5=A4=A7=E6=95=B0=E6=8D=AE=E9=87=8F=E4=B8=8B=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=B8=B8=E6=A0=87=E8=A1=A8=E8=BE=BE=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=E3=80=82=20=E3=80=90=E6=A0=B9?= =?UTF-8?q?=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20=E5=AF=B9=E4=BA=8Esql?= =?UTF-8?q?=20select=20a,=20cursor=20(xx)=20from=20table1=EF=BC=8C=20table?= =?UTF-8?q?1=E7=9A=84=E6=AF=8F=E4=B8=80=E8=A1=8C=E9=83=BD=E8=A6=81?= =?UTF-8?q?=E5=88=9B=E5=BB=BA=E4=B8=80=E4=B8=AAprotal=EF=BC=8Cprota?= =?UTF-8?q?=E9=9C=80=E8=A6=81=E6=8E=A8=E5=B8=A6session=E7=9A=84hash?= =?UTF-8?q?=E8=A1=A8=E4=B8=AD=E5=AD=98=E6=94=BE=EF=BC=8C=E5=A4=A7=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E9=87=8F=E7=9A=84=E6=97=B6=E5=80=99=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E6=92=91=E7=A0=B4=E5=86=85=E5=AD=98=E3=80=82=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=AF=B9?= =?UTF-8?q?=E4=BA=8Eselect=20a,=20cursor=20(xx)=20from=20table1=E6=AD=A4?= =?UTF-8?q?=E7=A7=8D=E5=9C=BA=E6=99=AF=EF=BC=8C=E5=85=B6=E5=AE=9E=E6=B8=B8?= =?UTF-8?q?=E6=A0=87=E6=98=AF=E6=B2=A1=E6=9C=89=E6=84=8F=E4=B9=89=E7=9A=84?= =?UTF-8?q?=EF=BC=8C=E5=9B=A0=E6=AD=A4=E6=B2=A1=E5=BF=85=E8=A6=81=E5=88=9B?= =?UTF-8?q?=E5=BB=BAprotal=EF=BC=8C=E5=8F=AF=E4=BB=A5=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E8=BF=94=E5=9B=9E=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94?= =?UTF-8?q?=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.co?= =?UTF-8?q?m/opengaussorg/dashboard=3Fissue=3DIAEZOR?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/copyfuncs.cpp | 1 + src/common/backend/nodes/equalfuncs.cpp | 1 + src/common/backend/nodes/outfuncs.cpp | 1 + src/common/backend/nodes/readfuncs.cpp | 1 + src/common/backend/parser/parse_expr.cpp | 11 +- src/gausskernel/runtime/executor/execQual.cpp | 11 +- src/include/nodes/parsenodes_common.h | 1 + src/include/parser/parse_node.h | 1 + .../regress/expected/cursor_expression.out | 261 +++++++++--------- .../output/cursor_expression_dump.source | 6 +- src/test/regress/sql/cursor_expression.sql | 6 + 11 files changed, 173 insertions(+), 128 deletions(-) diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index b6bc8ed7eb..1741812f27 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -5338,6 +5338,7 @@ static CursorExpression* _copyCursorExpression(const CursorExpression* from) COPY_STRING_FIELD(raw_query_str); COPY_NODE_FIELD(param); COPY_SCALAR_FIELD(location); + COPY_SCALAR_FIELD(is_simple_select_target); return newnode; } diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index 0b1b756abb..a7a830c65f 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -1270,6 +1270,7 @@ static bool _equalCursorExpression(const CursorExpression* a, const CursorExpres COMPARE_STRING_FIELD(raw_query_str); COMPARE_NODE_FIELD(param); COMPARE_SCALAR_FIELD(location); + COMPARE_SCALAR_FIELD(is_simple_select_target); return true; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 65d2b39161..d59fb43fd6 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -4160,6 +4160,7 @@ static void _outCursorExpression(StringInfo str, CursorExpression* node) WRITE_STRING_FIELD(raw_query_str); WRITE_NODE_FIELD(param); WRITE_INT_FIELD(location); + WRITE_BOOL_FIELD(is_simple_select_target); } diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 12ffc3017b..5b818c43b0 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -1712,6 +1712,7 @@ static CursorExpression* _readCursorExpression(void) READ_STRING_FIELD(raw_query_str); READ_NODE_FIELD(param); READ_INT_FIELD(location); + READ_BOOL_FIELD(is_simple_select_target); READ_DONE(); } diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 849b07fb31..df6c523c75 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -364,6 +364,7 @@ Node* transformExpr(ParseState* pstate, Node* expr, ParseExprKind exprKind) sv_expr_kind = pstate->p_expr_kind; pstate->p_expr_kind = exprKind; + pstate->p_expr_transform_level = 0; result = transformExprRecurse(pstate, expr); pstate->p_expr_kind = sv_expr_kind; @@ -380,6 +381,7 @@ Node *transformExprRecurse(ParseState *pstate, Node *expr) } /* Guard against stack overflow due to overly complex expressions */ check_stack_depth(); + pstate->p_expr_transform_level++; switch (nodeTag(expr)) { case T_ColumnRef: @@ -1903,7 +1905,7 @@ static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) if (i != seq) { dopControl.CloseSmp(); } - lfirst(args) = transformCursorExpression(pstate, (CursorExpression*)arg); + lfirst(args) = transformExprRecurse(pstate, arg); dopControl.ResetSmp(); } i++; @@ -3942,6 +3944,13 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur newm->raw_query_str = queryString; newm->param = (List*)copyObject(parse_state_parent->cursor_expression_para_var); + if (pstate->p_pre_columnref_hook == NULL && pstate->p_post_columnref_hook == NULL && + pstate->p_expr_kind == EXPR_KIND_SELECT_TARGET && pstate->p_expr_transform_level == 1) { + newm->is_simple_select_target = true; + } else { + newm->is_simple_select_target = false; + } + list_free_ext(stmt_list); list_free_ext(raw_parsetree_list); diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index f9e368fbc9..f7ace87578 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -5162,7 +5162,7 @@ Oid deparseNodeForInputype(Expr *expr, NodeTag type, float8 val) if (type == T_NanTest) { argexpr = ((NanTest *) expr)->arg; - } else if (type = T_InfiniteTest) { + } else if (type == T_InfiniteTest) { argexpr = ((InfiniteTest *) expr)->arg; } else { ereport(ERROR, @@ -5889,6 +5889,15 @@ static Datum ExecEvalCursorExpression(CursorExpressionState* state, ExprContext* } *isNull = false; + /* + * we will never access the protal later when the cursor expression is in a simple target query, such as query: select a, cursor("select xxx") from xx, + * so in this case we no need to create the protal and just need to return a dummy portal name + */ + if (cursor_expression->is_simple_select_target == true) { + portal_name = pstrdup(""); + return CStringGetTextDatum(portal_name); + } + portal = CreateNewPortal(false); oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal)); diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h index 41c909360d..0a000d37b4 100644 --- a/src/include/nodes/parsenodes_common.h +++ b/src/include/nodes/parsenodes_common.h @@ -654,6 +654,7 @@ typedef struct CursorExpression { char * raw_query_str; /* cursor expression raw query string*/ List* param; /* cursor expression param. used to passed outer var to expression query*/ int location; /* token location, or -1 if unknown */ + bool is_simple_select_target; /* plpgsql will set it as true and simple query will set it as false */ } CursorExpression; /* ---------------------- diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index be0a302e25..bbc0e5e47a 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -291,6 +291,7 @@ struct ParseState { bool has_uservar; bool is_outer_parse_state; /*is parse state is from outer scope, for cursor expression case*/ List* cursor_expression_para_var; + int p_expr_transform_level; /* indicast the expr transform level in the target list */ }; /* An element of p_relnamespace or p_varnamespace */ diff --git a/src/test/regress/expected/cursor_expression.out b/src/test/regress/expected/cursor_expression.out index 576fa27896..faa4fa1661 100644 --- a/src/test/regress/expected/cursor_expression.out +++ b/src/test/regress/expected/cursor_expression.out @@ -14,18 +14,18 @@ insert into company values ('h'),('a'),('t'); insert into employees2 values (1, 1, 'zhang', 'san', '1@abc.com', 123),(2, 1, 'li', 'si', '2@abc.com', 124); -- test cursor in select directly SELECT department_name, CURSOR(SELECT e.name FROM employees e) FROM departments d WHERE department_name='sale' ORDER BY department_name; - department_name | ?column? ------------------+-------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) -- test cursor in declare START TRANSACTION; CURSOR cursor1 FOR SELECT e.name, CURSOR(SELECT e1.name, CURSOR(select c2.name from company c2) fff FROM employees e1) abc FROM employees e; FETCH cursor1; - name | abc -----------+-------------------- - zhangsan | + name | abc +----------+------------------ + zhangsan | (1 row) CLOSE cursor1; @@ -53,22 +53,22 @@ begin end; / call test_cursor_2(); -NOTICE: company_name : zhangsan +NOTICE: company_name : zhangsan NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu NOTICE: last_name_name : heliu -NOTICE: company_name : lisi +NOTICE: company_name : lisi NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu NOTICE: last_name_name : heliu -NOTICE: company_name : wangwu +NOTICE: company_name : wangwu NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu NOTICE: last_name_name : heliu -NOTICE: company_name : heliu +NOTICE: company_name : heliu NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu @@ -110,71 +110,71 @@ begin end; / call test_cursor_2(); -NOTICE: company_name : zhangsan -NOTICE: last_name_name : zhangsan +NOTICE: company_name : zhangsan +NOTICE: last_name_name : zhangsan NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : lisi +NOTICE: last_name_name : lisi NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : wangwu +NOTICE: last_name_name : wangwu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : heliu +NOTICE: last_name_name : heliu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: company_name : lisi -NOTICE: last_name_name : zhangsan +NOTICE: company_name : lisi +NOTICE: last_name_name : zhangsan NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : lisi +NOTICE: last_name_name : lisi NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : wangwu +NOTICE: last_name_name : wangwu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : heliu +NOTICE: last_name_name : heliu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: company_name : wangwu -NOTICE: last_name_name : zhangsan +NOTICE: company_name : wangwu +NOTICE: last_name_name : zhangsan NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : lisi +NOTICE: last_name_name : lisi NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : wangwu +NOTICE: last_name_name : wangwu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : heliu +NOTICE: last_name_name : heliu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: company_name : heliu -NOTICE: last_name_name : zhangsan +NOTICE: company_name : heliu +NOTICE: last_name_name : zhangsan NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : lisi +NOTICE: last_name_name : lisi NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : wangwu +NOTICE: last_name_name : wangwu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t -NOTICE: last_name_name : heliu +NOTICE: last_name_name : heliu NOTICE: last_name_name2 : h NOTICE: last_name_name2 : a NOTICE: last_name_name2 : t @@ -206,25 +206,25 @@ BEGIN CLOSE c1; END; / -NOTICE: company_name : zhangsan +NOTICE: company_name : zhangsan NOTICE: employee_name : zhangsan NOTICE: employee_name : lisi NOTICE: employee_name : wangwu NOTICE: employee_name : heliu NOTICE: employee_name : heliu -NOTICE: company_name : lisi +NOTICE: company_name : lisi NOTICE: employee_name : zhangsan NOTICE: employee_name : lisi NOTICE: employee_name : wangwu NOTICE: employee_name : heliu NOTICE: employee_name : heliu -NOTICE: company_name : wangwu +NOTICE: company_name : wangwu NOTICE: employee_name : zhangsan NOTICE: employee_name : lisi NOTICE: employee_name : wangwu NOTICE: employee_name : heliu NOTICE: employee_name : heliu -NOTICE: company_name : heliu +NOTICE: company_name : heliu NOTICE: employee_name : zhangsan NOTICE: employee_name : lisi NOTICE: employee_name : wangwu @@ -265,7 +265,7 @@ NOTICE: my_cur isopen : t NOTICE: my_cur found : f NOTICE: my_cur not found : f NOTICE: my_cur row count : 0 -NOTICE: company_name : zhangsan +NOTICE: company_name : zhangsan NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu @@ -278,7 +278,7 @@ NOTICE: my_cur isopen : t NOTICE: my_cur found : f NOTICE: my_cur not found : f NOTICE: my_cur row count : 0 -NOTICE: company_name : lisi +NOTICE: company_name : lisi NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu @@ -291,7 +291,7 @@ NOTICE: my_cur isopen : t NOTICE: my_cur found : f NOTICE: my_cur not found : f NOTICE: my_cur row count : 0 -NOTICE: company_name : wangwu +NOTICE: company_name : wangwu NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu @@ -304,7 +304,7 @@ NOTICE: my_cur isopen : t NOTICE: my_cur found : f NOTICE: my_cur not found : f NOTICE: my_cur row count : 0 -NOTICE: company_name : heliu +NOTICE: company_name : heliu NOTICE: last_name_name : zhangsan NOTICE: last_name_name : lisi NOTICE: last_name_name : wangwu @@ -401,9 +401,9 @@ begin end; / call test_cursor_2(); -NOTICE: department_name : pro -NOTICE: department_name : rd -NOTICE: department_name : sale +NOTICE: department_name : pro +NOTICE: department_name : rd +NOTICE: department_name : sale NOTICE: department employees info : 1 zhang NOTICE: department employees info : 2 li test_cursor_2 @@ -437,9 +437,9 @@ begin end; / call test_cursor_2(); -NOTICE: department_name : pro -NOTICE: department_name : rd -NOTICE: department_name : sale +NOTICE: department_name : pro +NOTICE: department_name : rd +NOTICE: department_name : sale NOTICE: department employees info : 1 zhang NOTICE: department employees info : 2 li test_cursor_2 @@ -506,9 +506,9 @@ begin end; / call test_cursor_2(); -NOTICE: department_name : pro -NOTICE: department_name : rd -NOTICE: department_name : sale +NOTICE: department_name : pro +NOTICE: department_name : rd +NOTICE: department_name : sale NOTICE: department employees info : 1 zhang NOTICE: department employees info : 2 li test_cursor_2 @@ -519,11 +519,11 @@ NOTICE: department employees info : 2 li drop procedure test_cursor_2; -- test start with query SELECT department_name, CURSOR(with aa as (select employees_id from employees) select * from aa) FROM departments d; - department_name | ?column? ------------------+--------------------- - sale | - rd | - pro | + department_name | ?column? +-----------------+------------------ + sale | + rd | + pro | (3 rows) -- test conflict with cursor function @@ -552,37 +552,37 @@ CONTEXT: referenced column: cursor drop function pg_catalog.cursor(varchar); set behavior_compat_options = 'prefer_parse_cursor_parentheses_as_expr'; SELECT department_name, CURSOR((SELECT e.name FROM employees e)) FROM departments d WHERE department_name='sale' ORDER BY department_name; - department_name | ?column? ------------------+--------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) -- view contain cursor expr create view v1 as SELECT department_name, CURSOR(SELECT e.name FROM employees e) FROM departments d WHERE department_name='sale' ORDER BY department_name; select * from v1; - department_name | ?column? ------------------+--------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) drop view v1; -- some spec case SELECT department_name, CURSOR((SELECT e.name FROM employees e)) FROM departments d WHERE department_name='sale' ORDER BY department_name; - department_name | ?column? ------------------+--------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) SELECT department_name, CURSOR(((SELECT e.name FROM employees e))) FROM departments d WHERE department_name='sale' ORDER BY department_name; - department_name | ?column? ------------------+--------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) SELECT department_name, CURSOR(((/*test*/SELECT e.name FROM employees e))) FROM departments d WHERE department_name='sale' ORDER BY department_name; - department_name | ?column? ------------------+--------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) -- permission @@ -591,8 +591,11 @@ GRANT USAGE ON SCHEMA cursor_expression TO normalrole_user_001; GRANT SELECT ON departments TO normalrole_user_001 with grant option; SET SESSION AUTHORIZATION normalrole_user_001 PASSWORD 'Gauss@123'; SELECT department_name, CURSOR((SELECT e.name FROM employees e)) FROM departments d WHERE department_name='sale' ORDER BY department_name; -ERROR: permission denied for relation employees -DETAIL: N/A + department_name | ?column? +-----------------+------------------ + sale | +(1 row) + RESET SESSION AUTHORIZATION; drop user normalrole_user_001 cascade; -- error input @@ -625,10 +628,10 @@ begin end; / call test_cursor_2(); -NOTICE: company_name : zhangsan -NOTICE: company_name : lisi -NOTICE: company_name : wangwu -NOTICE: company_name : heliu +NOTICE: company_name : zhangsan +NOTICE: company_name : lisi +NOTICE: company_name : wangwu +NOTICE: company_name : heliu test_cursor_2 --------------- @@ -658,22 +661,22 @@ begin end; / call test_cursor_2(); -NOTICE: company_name : zhangsan +NOTICE: company_name : zhangsan NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu NOTICE: last_name : heliu -NOTICE: company_name : lisi +NOTICE: company_name : lisi NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu NOTICE: last_name : heliu -NOTICE: company_name : wangwu +NOTICE: company_name : wangwu NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu NOTICE: last_name : heliu -NOTICE: company_name : heliu +NOTICE: company_name : heliu NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu @@ -698,10 +701,10 @@ begin end; / call test_cursor_2(); -NOTICE: zhangsan -NOTICE: lisi -NOTICE: wangwu -NOTICE: heliu +NOTICE: zhangsan +NOTICE: lisi +NOTICE: wangwu +NOTICE: heliu test_cursor_2 --------------- @@ -722,10 +725,10 @@ begin end; / call test_cursor_2(); -NOTICE: zhangsan -NOTICE: lisi -NOTICE: wangwu -NOTICE: heliu +NOTICE: zhangsan +NOTICE: lisi +NOTICE: wangwu +NOTICE: heliu test_cursor_2 --------------- @@ -748,10 +751,10 @@ begin end; / call test_cursor_2(); -NOTICE: name : zhangsan -NOTICE: name : lisi -NOTICE: name : wangwu -NOTICE: name : heliu +NOTICE: name : zhangsan +NOTICE: name : lisi +NOTICE: name : wangwu +NOTICE: name : heliu test_cursor_2 --------------- @@ -778,22 +781,22 @@ begin end; / call test_cursor_2(); -NOTICE: zhangsan +NOTICE: zhangsan NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu NOTICE: last_name : heliu -NOTICE: lisi +NOTICE: lisi NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu NOTICE: last_name : heliu -NOTICE: wangwu +NOTICE: wangwu NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu NOTICE: last_name : heliu -NOTICE: heliu +NOTICE: heliu NOTICE: last_name : zhangsan NOTICE: last_name : lisi NOTICE: last_name : wangwu @@ -842,66 +845,78 @@ begin end; / call pro_cursor_0011_02(); -NOTICE: department_name : 3 -NOTICE: department employees info : pro +NOTICE: department_name : 3 +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department_name : 3 -NOTICE: department employees info : pro +NOTICE: department_name : 3 +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department_name : 2 -NOTICE: department employees info : rd +NOTICE: department_name : 2 +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department_name : 2 -NOTICE: department employees info : rd +NOTICE: department_name : 2 +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department_name : 1 -NOTICE: department employees info : sale +NOTICE: department_name : 1 +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 -NOTICE: department_name : 1 -NOTICE: department employees info : sale +NOTICE: department_name : 1 +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 pro_cursor_0011_02 -------------------- (1 row) +-- test insert as select cursor +create table test_insert(c1 varchar, c2 varchar); +insert into test_insert SELECT department_name, CURSOR(SELECT e.name FROM employees e) FROM departments d; +select * from test_insert; + c1 | c2 +------+------------------ + sale | + rd | + pro | +(3 rows) + -- clean +drop table test_insert; drop procedure pro_cursor_0011_02; drop table t_cursor_0011_01; drop table t_cursor_0011_02; diff --git a/src/test/regress/output/cursor_expression_dump.source b/src/test/regress/output/cursor_expression_dump.source index 56dfac6d75..315791618e 100644 --- a/src/test/regress/output/cursor_expression_dump.source +++ b/src/test/regress/output/cursor_expression_dump.source @@ -200,9 +200,9 @@ NOTICE: last_name_name2 : t (1 row) select * from v1; - department_name | ?column? ------------------+--------------------- - sale | + department_name | ?column? +-----------------+------------------ + sale | (1 row) drop schema cursor_expression_dump cascade; diff --git a/src/test/regress/sql/cursor_expression.sql b/src/test/regress/sql/cursor_expression.sql index 6412a9e299..80d5b6e6d3 100644 --- a/src/test/regress/sql/cursor_expression.sql +++ b/src/test/regress/sql/cursor_expression.sql @@ -522,7 +522,13 @@ end; / call pro_cursor_0011_02(); +-- test insert as select cursor +create table test_insert(c1 varchar, c2 varchar); +insert into test_insert SELECT department_name, CURSOR(SELECT e.name FROM employees e) FROM departments d; +select * from test_insert; + -- clean +drop table test_insert; drop procedure pro_cursor_0011_02; drop table t_cursor_0011_01; drop table t_cursor_0011_02; -- Gitee From b372df81e4d5085c446aa12ad120eaa5e9baf8c8 Mon Sep 17 00:00:00 2001 From: gentle_hu Date: Fri, 26 Jul 2024 10:15:58 +0800 Subject: [PATCH 096/347] fix shrink bug and misspelling --- src/gausskernel/optimizer/commands/tablecmds.cpp | 8 +++++--- src/gausskernel/process/tcop/utility.cpp | 12 ++++++------ src/include/commands/tablecmds.h | 2 +- .../row_compression/row_compress_feature.out | 6 +++++- .../sql/row_compression/row_compress_feature.sql | 2 ++ 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 2e6f4eeaf0..d3df3962d6 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -32530,7 +32530,7 @@ void ShrinkCfsChunkRestore(Oid relationId, LOCKMODE lockmode, bool nowait) relation_close(relation, lockmode); } -void ShrinkRealtionChunk(ShrinkStmt* shrink) +void ShrinkRelationChunk(ShrinkStmt* shrink) { ListCell* cell = NULL; foreach (cell, shrink->relations) { @@ -32544,9 +32544,11 @@ void ShrinkRealtionChunk(ShrinkStmt* shrink) reloid = RangeVarGetRelid(r, AccessShareLock, true); if (!OidIsValid(reloid)) { - continue; + ereport(ERROR, (errmsg("[shrink] relation %s%s%s does not exist.", + r->schemaname == NULL ? "" : r->schemaname, + r->schemaname == NULL ? "" : ".", + r->relname))); } - ShrinkCfsChunkRestore(reloid, AccessShareLock, shrink->nowait); } } diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 811ccec86e..a24a37f97a 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -2527,7 +2527,7 @@ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), #endif } -void ExecShrinkRealtionChunkStmt(Node* parse_tree, const char* query_string, bool sent_to_remote) +void ExecShrinkRelationChunkStmt(Node* parse_tree, const char* query_string, bool sent_to_remote) { #ifdef PGXC if (IS_PGXC_COORDINATOR) { @@ -2537,18 +2537,18 @@ void ExecShrinkRealtionChunkStmt(Node* parse_tree, const char* query_string, boo if (u_sess->attr.attr_sql.enable_parallel_ddl && !is_first_node) { ExecUtilityStmtOnNodes_ParallelDDLMode( query_string, NULL, sent_to_remote, false, EXEC_ON_COORDS, false, first_exec_node); - ShrinkRealtionChunk((ShrinkStmt*)(void *)parse_tree); + ShrinkRelationChunk((ShrinkStmt*)(void *)parse_tree); ExecUtilityStmtOnNodes_ParallelDDLMode( query_string, NULL, sent_to_remote, false, EXEC_ON_DATANODES, false, first_exec_node); } else { - ShrinkRealtionChunk((ShrinkStmt*)(void *)parse_tree); + ShrinkRelationChunk((ShrinkStmt*)(void *)parse_tree); ExecUtilityStmtOnNodes(query_string, NULL, sent_to_remote, false, EXEC_ON_ALL_NODES, false); } } else { - ShrinkRealtionChunk((ShrinkStmt*)(void *)parse_tree); + ShrinkRelationChunk((ShrinkStmt*)(void *)parse_tree); } #else - ShrinkRealtionChunk((ShrinkStmt*)(void *)parse_tree); + ShrinkRelationChunk((ShrinkStmt*)(void *)parse_tree); #endif } @@ -4932,7 +4932,7 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, break; #endif case T_ShrinkStmt: { - ExecShrinkRealtionChunkStmt(parse_tree, query_string, sent_to_remote); + ExecShrinkRelationChunkStmt(parse_tree, query_string, sent_to_remote); break; } case T_ReindexStmt: { diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index fdcccfea74..b7bb4c5155 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -114,7 +114,7 @@ extern void RemoveRelationsonMainExecCN(DropStmt* drop, ObjectAddresses* objects extern void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExecType* exec_type); -extern void ShrinkRealtionChunk(ShrinkStmt *shrink); +extern void ShrinkRelationChunk(ShrinkStmt *shrink); extern void RemoveObjectsonMainExecCN(DropStmt* drop, ObjectAddresses* objects, bool isFirstNode); diff --git a/src/test/regress/expected/row_compression/row_compress_feature.out b/src/test/regress/expected/row_compression/row_compress_feature.out index 36896cf1cc..4e80358045 100644 --- a/src/test/regress/expected/row_compression/row_compress_feature.out +++ b/src/test/regress/expected/row_compression/row_compress_feature.out @@ -408,6 +408,10 @@ shrink table row_compression_test_tbl1_kkk; shrink table row_compression_test_tbl1_kkk nowait; shrink table row_compression_test_tbl1_kkk_idx; shrink table row_compression_test_tbl1_kkk_idx nowait; +shrink table row_compression_test_not_exist; +ERROR: [shrink] relation row_compression_test_not_exist does not exist. +shrink table row_compression_test_not_exist nowait; +ERROR: [shrink] relation row_compression_test_not_exist does not exist. -- test truncate truncate table row_compression_test_tbl1_kkk; checkpoint; @@ -889,4 +893,4 @@ select * from compress_normal_user.hash_range order by month_code, dept_code; (6 rows) drop table if exists compress_normal_user.hash_range cascade; -drop schema if exists compress_normal_user cascade; \ No newline at end of file +drop schema if exists compress_normal_user cascade; diff --git a/src/test/regress/sql/row_compression/row_compress_feature.sql b/src/test/regress/sql/row_compression/row_compress_feature.sql index c9250759ab..cb8758241c 100644 --- a/src/test/regress/sql/row_compression/row_compress_feature.sql +++ b/src/test/regress/sql/row_compression/row_compress_feature.sql @@ -229,6 +229,8 @@ shrink table row_compression_test_tbl1_kkk; shrink table row_compression_test_tbl1_kkk nowait; shrink table row_compression_test_tbl1_kkk_idx; shrink table row_compression_test_tbl1_kkk_idx nowait; +shrink table row_compression_test_not_exist; +shrink table row_compression_test_not_exist nowait; -- test truncate truncate table row_compression_test_tbl1_kkk; -- Gitee From fdd455fc590b3e3f99e20891bd01f5a4c541fd91 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Thu, 25 Jul 2024 09:49:23 +0800 Subject: [PATCH 097/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E5=88=A4=E6=96=AD=E8=BE=93=E5=85=A5=E4=B8=BA0=E7=9A=84?= =?UTF-8?q?=E9=80=BB=E8=BE=91;=20=E4=BF=AE=E6=94=B9=E5=85=A5=E5=8F=82?= =?UTF-8?q?=E4=B8=BAfloat4=E7=9A=84=E8=B4=9F=E8=BE=B9=E7=95=8C=E5=80=BC?= =?UTF-8?q?=E6=97=B6=E4=B8=8D=E5=BA=94=E8=BF=94=E5=9B=9E=E8=B4=9F=E6=97=A0?= =?UTF-8?q?=E7=A9=B7=E5=A4=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/float.cpp | 42 ++++----- .../regress/expected/func_to_binary_float.out | 85 ++++++++++++++++++- src/test/regress/sql/func_to_binary_float.sql | 21 ++++- 3 files changed, 123 insertions(+), 25 deletions(-) diff --git a/src/common/backend/utils/adt/float.cpp b/src/common/backend/utils/adt/float.cpp index 0d6a4d74d8..ee920e078e 100644 --- a/src/common/backend/utils/adt/float.cpp +++ b/src/common/backend/utils/adt/float.cpp @@ -2956,7 +2956,6 @@ static double to_binary_float_internal(char* origin_num, bool *err) val += 0.0; } - // to_binary_float accept 'Nan', '[+-]Inf' if (endptr == num || errno != 0) { int save_errno = errno; @@ -2978,7 +2977,8 @@ static double to_binary_float_internal(char* origin_num, bool *err) } else if (save_errno == ERANGE) { // convert to infinite if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL) - val = (val == 0.0 ? 0 : (val >= HUGE_VAL ? get_float4_infinity() : -get_float4_infinity())); + val = (val == 0.0 ? 0 : (val >= HUGE_VAL ? + get_float4_infinity() : -get_float4_infinity())); } } #ifdef HAVE_BUGGY_SOLARIS_STRTOD @@ -3003,10 +3003,7 @@ static double to_binary_float_internal(char* origin_num, bool *err) } if (isinf((float4)val) && !isinf(val)) { - val = val < 0 ? -get_float4_infinity() : get_float4_infinity(); - } - if (((float4)val) == 0.0 && val != 0) { - val = 0; + val = val > 0 ? get_float4_infinity() : -get_float4_infinity(); } return val; @@ -3071,6 +3068,20 @@ Datum to_binary_float_text(PG_FUNCTION_ARGS) PG_RETURN_FLOAT4((float4)result); } +static double handle_float4_overflow(double val) +{ + double result = val; + if (result >= HUGE_VAL) { + result = get_float4_infinity(); + } else if (result <= -HUGE_VAL) { + result = -get_float4_infinity(); + } + if (isinf((float4)result) && !isinf(result)) { + result = result > 0 ? get_float4_infinity() : -get_float4_infinity(); + } + return result; +} + /* * to_binary_float_number() */ @@ -3079,14 +3090,8 @@ Datum to_binary_float_number(PG_FUNCTION_ARGS) if (PG_ARGISNULL(0)) PG_RETURN_NULL(); - float8 val = PG_GETARG_FLOAT8(0); - - if (val > FLT_MAX) { - val = get_float4_infinity(); - } else if (val < FLT_MIN) { - val = -get_float4_infinity(); - } - + float8 val = handle_float4_overflow(PG_GETARG_FLOAT8(0)); + PG_RETURN_FLOAT4((float4)val); } @@ -3109,13 +3114,8 @@ Datum to_binary_float_text_number(PG_FUNCTION_ARGS) // if str1 convert err, and with default, convert str2 if (with_default && err && !PG_ARGISNULL(1)) { err = false; - result = PG_GETARG_FLOAT8(1); - if (result > FLT_MAX) { - result = get_float4_infinity(); - } else if (result < FLT_MIN) { - result = -get_float4_infinity(); - } - } + result = handle_float4_overflow(PG_GETARG_FLOAT8(1)); + } if (err) { ereport(ERROR, diff --git a/src/test/regress/expected/func_to_binary_float.out b/src/test/regress/expected/func_to_binary_float.out index 49fc90b76a..5732a29c4f 100644 --- a/src/test/regress/expected/func_to_binary_float.out +++ b/src/test/regress/expected/func_to_binary_float.out @@ -98,7 +98,7 @@ SELECT TO_BINARY_FLOAT(1.79769313486231E+100); SELECT TO_BINARY_FLOAT(2.22507485850720E-100); to_binary_float ----------------- - -Infinity + 0 (1 row) SELECT TO_BINARY_FLOAT(1.79769313486231E+310); -- error: overflow @@ -175,7 +175,7 @@ SELECT TO_BINARY_FLOAT(c1 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1 8 (8 rows) -SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error, column can't be default param +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- errorя╝М column can't be default param ERROR: Default param can't be ColumnRef SELECT TO_BINARY_FLOAT(c2 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; to_binary_float @@ -434,6 +434,18 @@ SELECT TO_BINARY_FLOAT(' 6.66 ' DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error ERROR: invalid input syntax for type real CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(3.402E+100); + to_binary_float +----------------- + Infinity +(1 row) + +SELECT TO_BINARY_FLOAT('test' DEFAULT 3.04E+100 ON CONVERSION ERROR); + to_binary_float +----------------- + Infinity +(1 row) + -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); to_binary_float @@ -444,7 +456,7 @@ SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT(2.22507485850720E-100 DEFAULT 3.14 ON CONVERSION ERROR); to_binary_float ----------------- - -Infinity + 0 (1 row) SELECT TO_BINARY_FLOAT('1.79769313486231E+100' DEFAULT 3.14 ON CONVERSION ERROR); @@ -531,6 +543,72 @@ SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR) (1 row) +-- test zero +SELECT TO_BINARY_FLOAT(0); + to_binary_float +----------------- + 0 +(1 row) + +SELECT TO_BINARY_FLOAT('test' DEFAULT 0 ON CONVERSION ERROR); + to_binary_float +----------------- + 0 +(1 row) + +SELECT TO_BINARY_FLOAT(0 DEFAULT 0 ON CONVERSION ERROR); + to_binary_float +----------------- + 0 +(1 row) + +SELECT TO_BINARY_FLOAT(0 DEFAULT 123 ON CONVERSION ERROR); + to_binary_float +----------------- + 0 +(1 row) + +-- test negetive +CREATE TABLE t_ne (c1 float4); +INSERT INTO t_ne VALUES (3.402E+38); +INSERT INTO t_ne VALUES (-3.402E+38); +SELECT c1, TO_BINARY_FLOAT(c1) FROM t_ne; + c1 | to_binary_float +------------+----------------- + 3.402e+38 | 3.402e+38 + -3.402e+38 | -3.402e+38 +(2 rows) + +SELECT TO_BINARY_FLOAT(-3.402E+38); + to_binary_float +----------------- + -3.402e+38 +(1 row) + +SELECT TO_BINARY_FLOAT('test' DEFAULT 3.402E+38 ON CONVERSION ERROR); + to_binary_float +----------------- + 3.402e+38 +(1 row) + +SELECT TO_BINARY_FLOAT('test' DEFAULT -3.402E+38 ON CONVERSION ERROR); + to_binary_float +----------------- + -3.402e+38 +(1 row) + +SELECT TO_BINARY_FLOAT(-3.402E+100); + to_binary_float +----------------- + -Infinity +(1 row) + +SELECT TO_BINARY_FLOAT('test' DEFAULT -3.04E+100 ON CONVERSION ERROR); + to_binary_float +----------------- + -Infinity +(1 row) + -- pbe CREATE TABLE tbf2 (c1 int, func_info text, res float4); PREPARE default_param_text2(int, text, text) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); @@ -606,4 +684,5 @@ SELECT * FROM tbf2 ORDER BY c1; DROP TABLE tbf; DROP TABLE tbf2; +DROP TABLE t_ne; drop schema if exists func_tbf cascade; diff --git a/src/test/regress/sql/func_to_binary_float.sql b/src/test/regress/sql/func_to_binary_float.sql index 1ec614feb8..23416193b6 100644 --- a/src/test/regress/sql/func_to_binary_float.sql +++ b/src/test/regress/sql/func_to_binary_float.sql @@ -43,7 +43,7 @@ SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; SELECT TO_BINARY_FLOAT(c1 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; -SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error, column can't be default param +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- errorя╝М column can't be default param SELECT TO_BINARY_FLOAT(c2 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; SELECT TO_BINARY_FLOAT(c2 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; @@ -75,6 +75,8 @@ SELECT TO_BINARY_FLOAT('today' DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT('3.14today' DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT(' 6.66 ' DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error +SELECT TO_BINARY_FLOAT(3.402E+100); +SELECT TO_BINARY_FLOAT('test' DEFAULT 3.04E+100 ON CONVERSION ERROR); -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); @@ -96,6 +98,22 @@ SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR) SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); -- NULL +-- test zero +SELECT TO_BINARY_FLOAT(0); +SELECT TO_BINARY_FLOAT('test' DEFAULT 0 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(0 DEFAULT 0 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(0 DEFAULT 123 ON CONVERSION ERROR); + +-- test negetive +CREATE TABLE t_ne (c1 float4); +INSERT INTO t_ne VALUES (3.402E+38); +INSERT INTO t_ne VALUES (-3.402E+38); +SELECT c1, TO_BINARY_FLOAT(c1) FROM t_ne; +SELECT TO_BINARY_FLOAT(-3.402E+38); +SELECT TO_BINARY_FLOAT('test' DEFAULT 3.402E+38 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('test' DEFAULT -3.402E+38 ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(-3.402E+100); +SELECT TO_BINARY_FLOAT('test' DEFAULT -3.04E+100 ON CONVERSION ERROR); -- pbe CREATE TABLE tbf2 (c1 int, func_info text, res float4); @@ -141,5 +159,6 @@ SELECT * FROM tbf2 ORDER BY c1; DROP TABLE tbf; DROP TABLE tbf2; +DROP TABLE t_ne; drop schema if exists func_tbf cascade; -- Gitee From 966c5103c27abbde93efa66dd0555ecf3b71b3e2 Mon Sep 17 00:00:00 2001 From: zhaosen Date: Fri, 26 Jul 2024 13:58:31 +0800 Subject: [PATCH 098/347] Correct the name of functions --- src/gausskernel/storage/access/table/tableam.cpp | 2 +- src/gausskernel/storage/access/ustore/knl_uscan.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index f852b2d82d..83ebf3b236 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -927,7 +927,7 @@ void UHeapamScanMarkpos(TableScanDesc sscan) void UHeapamScanInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) { - return UeapInitParallelSeqscan(sscan, dop, dir); + return UHeapInitParallelSeqscan(sscan, dop, dir); } void UHeapamScanEndscan(TableScanDesc sscan) diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index 92f6bacadb..f12c921b8a 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -1206,7 +1206,7 @@ void UHeapRestRpos(TableScanDesc sscan) } } -void UeapInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) +void UHeapInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) { HeapScanDesc scan = (HeapScanDesc) sscan; -- Gitee From 621008d477a31224b4790beae7977f318a0cee36 Mon Sep 17 00:00:00 2001 From: wangjingyuan <1577039175@qq.com> Date: Fri, 26 Jul 2024 14:19:31 +0800 Subject: [PATCH 099/347] =?UTF-8?q?=E6=94=AF=E6=8C=81=E5=90=91=E9=87=8F?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E5=BA=93datavec=E5=B9=B6=E8=A1=8C=E5=88=9B?= =?UTF-8?q?=E5=BB=BA=E7=B4=A2=E5=BC=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/sort/tuplesort.cpp | 4 ++-- src/include/utils/tuplesort.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/common/backend/utils/sort/tuplesort.cpp b/src/common/backend/utils/sort/tuplesort.cpp index c687adfdc7..bfb6ab2da0 100644 --- a/src/common/backend/utils/sort/tuplesort.cpp +++ b/src/common/backend/utils/sort/tuplesort.cpp @@ -887,9 +887,9 @@ static Tuplesortstate* tuplesort_begin_common(int64 workMem, bool randomAccess, Tuplesortstate* tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber* attNums, Oid* sortOperators, Oid* sortCollations, const bool* nullsFirstFlags, int64 workMem, bool randomAccess, int64 maxMem, int planId, - int dop) + int dop, SortCoordinate coordinate) { - Tuplesortstate* state = tuplesort_begin_common(workMem, randomAccess); + Tuplesortstate* state = tuplesort_begin_common(workMem, randomAccess, coordinate); MemoryContext oldcontext; int i; diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index de537ed510..f1f7dde48a 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -123,7 +123,7 @@ typedef struct SortCoordinateData *SortCoordinate; extern Tuplesortstate* tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber* attNums, Oid* sortOperators, Oid* sortCollations, const bool* nullsFirstFlags, int64 workMem, bool randomAccess, int64 maxMem = 0, - int planId = 0, int dop = 1); + int planId = 0, int dop = 1, SortCoordinate coordinate = NULL); extern Tuplesortstate* tuplesort_begin_cluster( TupleDesc tupDesc, Relation indexRel, int workMem, bool randomAccess, int maxMem, bool relIsUstore); extern Tuplesortstate* tuplesort_begin_index_btree( -- Gitee From 01d4c96ad4d4a4e3de7728ae5070a935a0c915aa Mon Sep 17 00:00:00 2001 From: zhaosen Date: Fri, 26 Jul 2024 14:45:14 +0800 Subject: [PATCH 100/347] reuse function in astore --- .../storage/access/table/tableam.cpp | 6 +-- .../storage/access/ustore/knl_uscan.cpp | 54 ++++++++----------- src/include/access/ustore/knl_uscan.h | 1 - 3 files changed, 22 insertions(+), 39 deletions(-) diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index 83ebf3b236..200ba28a1e 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -925,10 +925,6 @@ void UHeapamScanMarkpos(TableScanDesc sscan) return UHeapMarkPos(sscan); } -void UHeapamScanInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) -{ - return UHeapInitParallelSeqscan(sscan, dop, dir); -} void UHeapamScanEndscan(TableScanDesc sscan) { @@ -1220,7 +1216,7 @@ static const TableAmRoutine g_ustoream_methods = { scan_restrpos : UHeapamScanRestrpos, scan_markpos : UHeapamScanMarkpos, - scan_init_parallel_seqscan : UHeapamScanInitParallelSeqscan, + scan_init_parallel_seqscan : HeapamScanInitParallelSeqscan, scan_getnexttuple : UHeapamScanGetnexttuple, scan_GetNextBatch : UHeapamGetNextBatchMode, scan_getpage : UHeapamScanGetpage, diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index f12c921b8a..b2c8af3dfb 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -91,12 +91,28 @@ bool NextUpage(UHeapScanDesc scan, ScanDirection dir, BlockNumber& page) bool finished = false; if (scan->dop > 1) { Assert(scan->rs_parallel == NULL); - Assert(dir == ForwardScanDirection); - page++; - if ((page - scan->rs_base.rs_startblock) % PARALLEL_SCAN_GAP == 0) { - page += (scan->dop - 1) * PARALLEL_SCAN_GAP; + if (BackwardScanDirection == dir) { + finished = (page == 0); + if (finished) + return finished; + page--; + if ((scan->rs_base.rs_startblock - page) % PARALLEL_SCAN_GAP == 0) { + page -= (scan->dop - 1) * PARALLEL_SCAN_GAP; + } + } else { + page++; + if ((page - scan->rs_base.rs_startblock) % PARALLEL_SCAN_GAP == 0) { + page += (scan->dop - 1) * PARALLEL_SCAN_GAP; + } + + if (scan->rs_base.rs_rangeScanInRedis.isRangeScanInRedis) { + /* Parallel workers start from different point. */ + finished = + (page >= scan->rs_base.rs_startblock + scan->rs_base.rs_nblocks - PARALLEL_SCAN_GAP * u_sess->stream_cxt.smp_id); + } else { + finished = (page >= scan->rs_base.rs_nblocks); + } } - finished = (page >= scan->rs_base.rs_nblocks); } else { /* * advance to next/prior page and detect end of scan @@ -1206,34 +1222,6 @@ void UHeapRestRpos(TableScanDesc sscan) } } -void UHeapInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir) -{ - HeapScanDesc scan = (HeapScanDesc) sscan; - - if (!scan || scan->rs_base.rs_nblocks == 0) { - return; - } - - if (dop <= 1) { - return; - } - - scan->dop = dop; - - uint32 paral_blocks = u_sess->stream_cxt.smp_id * PARALLEL_SCAN_GAP; - - /* If not enough pages to divide into every worker. */ - if (scan->rs_base.rs_nblocks <= paral_blocks) { - scan->rs_base.rs_startblock = 0; - scan->rs_base.rs_nblocks = 0; - return; - } - if(dir == BackwardScanDirection){ - ereport(ERROR, (errmsg("Backward Scan Direction is not support for ustore parallel seq scan."))); - } - scan->rs_base.rs_startblock = paral_blocks; -} - UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir, bool* has_cur_xact_write) { UHeapScanDesc scan = (UHeapScanDesc)sscan; diff --git a/src/include/access/ustore/knl_uscan.h b/src/include/access/ustore/knl_uscan.h index 5423340f01..e8a7fae26a 100644 --- a/src/include/access/ustore/knl_uscan.h +++ b/src/include/access/ustore/knl_uscan.h @@ -72,7 +72,6 @@ bool UHeapScanBitmapNextTuple(TableScanDesc sscan, TBMIterateResult *tbmres, Tup bool UHeapScanBitmapNextBlock(TableScanDesc sscan, const TBMIterateResult *tbmres, bool* has_cur_xact_write = NULL); bool UHeapGetPage(TableScanDesc sscan, BlockNumber page, bool* has_cur_xact_write = NULL); -void UeapInitParallelSeqscan(TableScanDesc sscan, int32 dop, ScanDirection dir); UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir, bool* has_cur_xact_write = NULL); extern bool UHeapGetTupPageBatchmode(UHeapScanDesc scan, ScanDirection dir); -- Gitee From a50aa3fc6ff5086ea42e30bfd6d8c229e3524fdb Mon Sep 17 00:00:00 2001 From: yaoxin Date: Fri, 26 Jul 2024 15:10:15 +0800 Subject: [PATCH 101/347] fix deadlock --- src/gausskernel/storage/access/ustore/knl_utuple.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/knl_utuple.cpp b/src/gausskernel/storage/access/ustore/knl_utuple.cpp index e15f26eadf..2e84ec64f3 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuple.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuple.cpp @@ -1324,8 +1324,6 @@ HeapTuple UHeapCopyHeapTuple(TupleTableSlot *slot) if (slot->tts_tuple != NULL) { tuple->t_self = ((UHeapTuple)slot->tts_tuple)->ctid; tuple->t_tableOid = ((UHeapTuple)slot->tts_tuple)->table_oid; - tuple->t_xid_base = ((UHeapTuple)slot->tts_tuple)->t_xid_base; - tuple->t_data->t_choice.t_heap.t_xmin = ((UHeapTuple)slot->tts_tuple)->disk_tuple->xid; } return tuple; -- Gitee From e96cd23a29df0b5362fc151d7eaebb5277d50d56 Mon Sep 17 00:00:00 2001 From: bowenliu Date: Thu, 25 Jul 2024 11:45:47 +0800 Subject: [PATCH 102/347] 0716dmstrace --- src/bin/gs_guc/cluster_guc.conf | 2 + .../backend/utils/misc/guc/guc_storage.cpp | 24 ++++++++ .../utils/misc/postgresql_single.conf.sample | 2 + .../ddes/adapter/ss_dms_bufmgr.cpp | 2 +- src/gausskernel/ddes/adapter/ss_init.cpp | 2 + src/gausskernel/ddes/ddes_commit_id | 4 +- src/include/ddes/dms/dms_api.h | 61 ++++++++++--------- .../knl/knl_guc/knl_instance_attr_storage.h | 2 + .../regress/output/recovery_2pc_tools.source | 2 + 9 files changed, 70 insertions(+), 31 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index ddfd0467bb..735da230ae 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -763,6 +763,8 @@ ss_dss_xlog_vg_name|string|0,0|NULL|NULL| ss_dss_conn_path|string|0,0|NULL|NULL| ss_enable_dms|bool|0,0|NULL|NULL| ss_enable_catalog_centralized|bool|0,0|NULL|NULL| +ss_enable_dynamic_trace|bool|0,0|NULL|NULL| +ss_enable_reform_trace|bool|0,0|NULL|NULL| ss_enable_ssl|bool|0,0|NULL|NULL| ss_enable_aio|bool|0,0|NULL|NULL| ss_enable_ondemand_realtime_build|bool|0,0|NULL|NULL| diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index c38bfe844a..7b72bd5e47 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -1148,6 +1148,30 @@ static void InitStorageConfigureNamesBool() NULL, NULL, NULL}, + {{"ss_enable_dynamic_trace", + PGC_POSTMASTER, + NODE_SINGLENODE, + SHARED_STORAGE_OPTIONS, + gettext_noop("Whether dms dynamic trace is enabled"), + NULL, + GUC_SUPERUSER_ONLY}, + &g_instance.attr.attr_storage.dms_attr.enable_dyn_trace, + true, + NULL, + NULL, + NULL}, + {{"ss_enable_reform_trace", + PGC_POSTMASTER, + NODE_SINGLENODE, + SHARED_STORAGE_OPTIONS, + gettext_noop("Whether dms reform dynamic trace is enabled"), + NULL, + GUC_SUPERUSER_ONLY}, + &g_instance.attr.attr_storage.dms_attr.enable_reform_trace, + true, + NULL, + NULL, + NULL}, {{"ss_enable_ssl", PGC_POSTMASTER, NODE_SINGLENODE, diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample index b68c61e682..b85978598a 100644 --- a/src/common/backend/utils/misc/postgresql_single.conf.sample +++ b/src/common/backend/utils/misc/postgresql_single.conf.sample @@ -835,6 +835,8 @@ job_queue_processes = 10 # Number of concurrent jobs, optional: [0..1000] #ss_enable_ssl = on #ss_enable_aio = on #ss_enable_catalog_centralized = on +#ss_enable_dynamic_trace = on +#ss_enable_reform_trace = on #ss_instance_id = 0 #ss_dss_data_vg_name = '' #ss_dss_xlog_vg_name = '' diff --git a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp index 644751ff6b..24a4d775da 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp @@ -95,7 +95,7 @@ void TransformLockTagToDmsLatch(dms_drlatch_t* dlatch, const LOCKTAG locktag) dlatch->drid.type = locktag.locktag_type; dlatch->drid.oid = locktag.locktag_field1; dlatch->drid.index = locktag.locktag_field2; - dlatch->drid.parent_part = locktag.locktag_field3; + dlatch->drid.parent = locktag.locktag_field3; dlatch->drid.part = locktag.locktag_field4; dlatch->drid.uid = locktag.locktag_field5; } diff --git a/src/gausskernel/ddes/adapter/ss_init.cpp b/src/gausskernel/ddes/adapter/ss_init.cpp index 7e078be4ab..d982ec3744 100644 --- a/src/gausskernel/ddes/adapter/ss_init.cpp +++ b/src/gausskernel/ddes/adapter/ss_init.cpp @@ -395,6 +395,8 @@ static void set_dms_fi() static void setDMSProfile(dms_profile_t* profile) { knl_instance_attr_dms* dms_attr = &g_instance.attr.attr_storage.dms_attr; + profile->enable_dyn_trace = dms_attr->enable_dyn_trace; + profile->enable_reform_trace = dms_attr->enable_reform_trace; profile->resource_catalog_centralized = (unsigned int)dms_attr->enable_catalog_centralized; profile->inst_id = (uint32)dms_attr->instance_id; profile->page_size = BLCKSZ; diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 026c74ffd4..4e05e27af4 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=106e4c8860d9c307f612829c9f44a19d0402962d +dms_commit_id=4e5c455f771c9be5e78260371eed2f28474cf253 dss_commit_id=b714d964156722f4353840c0f5bb66c92158e3c6 -cbb_commit_id=7de11250903f6d5ade6deab3dc04fff6b29d0190 +cbb_commit_id=ac8ed05be35754e77032b4c9ec9b1eba53f1d5a6 diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index 53e38ae9e6..c0f29a600e 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -34,15 +34,19 @@ extern "C" { #define DMS_LOCAL_MINOR_VER_WEIGHT 1000 #define DMS_LOCAL_MAJOR_VERSION 0 #define DMS_LOCAL_MINOR_VERSION 0 -#define DMS_LOCAL_VERSION 160 +#define DMS_LOCAL_VERSION 162 #define DMS_SUCCESS 0 #define DMS_ERROR (-1) #ifdef OPENGAUSS #define DMS_PAGEID_SIZE 24 // openGauss bufferTag size #else -#define DMS_PAGEID_SIZE 16 +#define DMS_PAGEID_SIZE 8 #endif +#define DMS_ALOCK_NAME_SIZE 128 +#define DMS_ALOCKID_SIZE sizeof(alockid_t) +#define DMS_DRID_SIZE sizeof(dms_drid_t) +#define DMS_RESID_SIZE DMS_ALOCKID_SIZE #define DMS_XID_SIZE 12 #define DMS_INSTANCES_SIZE 4 @@ -153,34 +157,27 @@ typedef struct st_dms_drid { struct { unsigned long long key1; unsigned long long key2; - unsigned int key3; + unsigned long long key3; }; struct { - unsigned short type; // lock type - union { - unsigned short uid; // user id, for table lock resource - unsigned short len; - }; - union { - struct { - unsigned int oid; // lock id - unsigned int index; // index id - unsigned int parent_part; // parent partition id - unsigned int part; // partition id - }; - struct { - unsigned long long oid_64; - unsigned long long unused; - }; - struct { - unsigned char resid[DMS_DRID_CTX_SIZE]; - }; - }; + unsigned short type; + unsigned short uid; + unsigned int index; + unsigned long long oid; + unsigned int parent; + unsigned int part; }; }; } dms_drid_t; #pragma pack() +typedef struct st_alockid { + char name[DMS_ALOCK_NAME_SIZE]; + unsigned char len; + unsigned char type; + unsigned char unused[2]; +} alockid_t; + typedef enum en_drc_res_type { DRC_RES_INVALID_TYPE, DRC_RES_PAGE_TYPE, @@ -190,6 +187,7 @@ typedef enum en_drc_res_type { DRC_RES_LOCAL_TXN_TYPE, DRC_RES_LOCK_ITEM_TYPE, DRC_RES_GLOBAL_XA_TYPE, + DRC_RES_ALOCK_TYPE, DRC_RES_TYPE_MAX_COUNT, } drc_res_type_e; @@ -263,9 +261,6 @@ typedef struct st_dms_cr_assist_t { dms_cr_status_t status; /* OUT parameter */ } dms_cr_assist_t; -#define DMS_RESID_SIZE 132 -#define DMS_DRID_SIZE sizeof(dms_drid_t) - typedef struct st_dms_drlock { dms_drid_t drid; } dms_drlock_t; @@ -620,6 +615,8 @@ typedef enum en_dms_wait_event { DMS_EVT_DCS_REQ_XA_IN_USE, DMS_EVT_DCS_REQ_END_XA, DMS_EVT_REQ_CKPT, + DMS_EVT_PROC_GENERIC_REQ, + DMS_EVT_PROC_REFORM_REQ, // add new enum at tail, or make adaptations to openGauss DMS_EVT_COUNT, @@ -1000,6 +997,7 @@ typedef int (*dms_az_failover_promote_resetlog)(void *db_handle); typedef int (*dms_az_failover_promote_phase2)(void *db_handle); typedef int (*dms_check_shutdown_consistency)(void *db_handle, instance_list_t *old_remove); typedef int (*dms_check_db_readwrite)(void *db_handle); +typedef unsigned int (*dms_check_is_maintain)(); typedef struct st_dms_callback { // used in reform @@ -1187,6 +1185,7 @@ typedef struct st_dms_callback { dms_get_alock_wait_info get_alock_wait_info; dms_check_shutdown_consistency check_shutdown_consistency; dms_check_db_readwrite check_db_readwrite; + dms_check_is_maintain check_is_maintain; } dms_callback_t; typedef struct st_dms_instance_net_addr { @@ -1251,6 +1250,8 @@ typedef struct st_dms_profile { unsigned char enable_mes_task_threadpool; unsigned int mes_task_worker_max_cnt; unsigned int max_alive_time_for_abnormal_status; + unsigned char enable_dyn_trace; + unsigned char enable_reform_trace; } dms_profile_t; typedef struct st_logger_param { @@ -1347,8 +1348,6 @@ typedef struct st_dms_tlock_info { unsigned char unused[3]; } dms_tlock_info_t; -typedef dms_tlock_info_t dms_alock_info_t; - typedef struct thread_info { char thread_name[DMS_MAX_NAME_LEN]; void *thread_info; @@ -1359,6 +1358,12 @@ typedef struct thread_set { int thread_count; } thread_set_t; +typedef struct st_dms_alock_info { + alockid_t alockid; + unsigned char lock_mode; + unsigned char unused[3]; +} dms_alock_info_t; + typedef struct st_driver_ping_info { unsigned long long rw_bitmap; dms_role_t dms_role; diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index 772c8ed164..267846f93f 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -129,6 +129,8 @@ typedef struct knl_instance_attr_dms { bool enable_bcast_snapshot; char* work_thread_pool_attr; int32 work_thread_pool_max_cnt; + bool enable_dyn_trace; + bool enable_reform_trace; } knl_instance_attr_dms; typedef struct knl_instance_attr_storage { diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index 9a87d43212..c073b44de5 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -650,6 +650,8 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c ss_enable_aio | bool | | | ss_enable_bcast_snapshot | bool | | | ss_enable_catalog_centralized | bool | | | + ss_enable_dynamic_trace | bool | | | + ss_enable_reform_trace | bool | | | ss_enable_dms | bool | | | ss_enable_dss | bool | | | ss_enable_ondemand_realtime_build | bool | | | -- Gitee From c25093577e5de7b6bcc694615eca2f111695c665 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Tue, 23 Jul 2024 16:41:48 +0800 Subject: [PATCH 103/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=B8=B8=E6=A0=87?= =?UTF-8?q?=E8=A1=A8=E8=BE=BE=E5=BC=8F=E5=81=9Atargetlist=E7=9A=84coredump?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_expr.cpp | 31 ++++++++++++------- .../expected/parallel_enable_function.out | 24 ++++++++++++++ src/test/regress/expected/smp_cursor.out | 26 ++++++++++++++++ .../regress/sql/parallel_enable_function.sql | 3 ++ src/test/regress/sql/smp_cursor.sql | 5 +++ 5 files changed, 77 insertions(+), 12 deletions(-) diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index df6c523c75..b85da6f35a 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -110,7 +110,7 @@ static Node *transformStartWithWhereClauseColumnRef(ParseState *pstate, ColumnRe static Node* tryTransformFunc(ParseState* pstate, List* fields, int location); static void SubCheckOutParam(List* exprtargs, Oid funcid); static Node* transformPrefixKey(ParseState* pstate, PrefixKey* pkey); -static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cursor_expression); +static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cursor_expression, bool smp = false); static Node* transformStringCast(ParseState* pstate, char *str, int location, TypeName *typname); static Node* transformBinaryDoubleInf(ParseState* pstate, ColumnRef *cref, char *colname); static Node* transformBinaryDoubleNan(ParseState* pstate, ColumnRef *cref, char *colname); @@ -1893,20 +1893,19 @@ static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) result = ParseFuncOrColumn(pstate, fn->funcname, targs, last_srf, fn, fn->location, fn->call_func); if (IsA(result, FuncExpr)) { - /* if function is not SRF or pipelined, close smp for all CursorExpressions */ - int2 seq = (!((FuncExpr*)result)->funcretset && - !PROC_IS_PIPELINED(get_func_prokind(((FuncExpr*)result)->funcid))) ? - -1 : GetParallelCursorSeq(((FuncExpr*)result)->funcid); + /* + * If function is not function table, close smp for all CursorExpressions. + * If function is not SRF or pipelined, close smp too. + * */ + int2 seq = (pstate->p_expr_kind != EXPR_KIND_FROM_FUNCTION || (!((FuncExpr*)result)->funcretset && + !PROC_IS_PIPELINED(get_func_prokind(((FuncExpr*)result)->funcid)))) ? + -1 : GetParallelCursorSeq(((FuncExpr*)result)->funcid); int2 i = 0; - AutoDopControl dopControl; foreach (args, ((FuncExpr*)result)->args) { Node* arg = (Node*)lfirst(args); if (IsA(arg, CursorExpression)) { - if (i != seq) { - dopControl.CloseSmp(); - } - lfirst(args) = transformExprRecurse(pstate, arg); - dopControl.ResetSmp(); + pstate->p_expr_transform_level++; + lfirst(args) = transformCursorExpression(pstate, (CursorExpression*)arg, i == seq); } i++; } @@ -3890,7 +3889,7 @@ static Node* transformCursorOuterVarAsParam(ParseState* pstate, ColumnRef* cref, } -static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cursor_expression) +static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cursor_expression, bool smp) { CursorExpression* newm = makeNode(CursorExpression); char* queryString; @@ -3900,6 +3899,11 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur List* stmt_list = NIL; ParseState* parse_state_temp = NULL; int level = ++u_sess->parser_cxt.cursor_expr_level; + AutoDopControl dopControl; + + if (!smp) { + dopControl.CloseSmp(); + } ParseState* parse_state_parent = pstate; @@ -3961,6 +3965,9 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur parse_state_parent->is_outer_parse_state = false; parse_state_temp = parse_state_temp->parentParseState; } + + /* restore smp */ + dopControl.ResetSmp(); return (Node*)newm; } diff --git a/src/test/regress/expected/parallel_enable_function.out b/src/test/regress/expected/parallel_enable_function.out index 36f755b031..e192b9c95f 100644 --- a/src/test/regress/expected/parallel_enable_function.out +++ b/src/test/regress/expected/parallel_enable_function.out @@ -383,6 +383,30 @@ select hash_srf(cursor (select * from employees)) limit 10; (10,60,abc,def,123,123) (10 rows) +explain (costs off) select hash_srf(cursor (select * from employees)) from employees limit 10; + QUERY PLAN +---------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Seq Scan on employees +(4 rows) + +select hash_srf(cursor (select * from employees)) from employees limit 10; + hash_srf +------------------------- + (1,60,abc,def,123,123) + (2,60,abc,def,123,123) + (3,60,abc,def,123,123) + (4,60,abc,def,123,123) + (5,60,abc,def,123,123) + (6,60,abc,def,123,123) + (7,60,abc,def,123,123) + (8,60,abc,def,123,123) + (9,60,abc,def,123,123) + (10,60,abc,def,123,123) +(10 rows) + -- subquery cannot smp explain (costs off) select 1, (select count(*) from hash_srf(cursor (select * from employees))); QUERY PLAN diff --git a/src/test/regress/expected/smp_cursor.out b/src/test/regress/expected/smp_cursor.out index 96b492e4cf..451be46f8e 100644 --- a/src/test/regress/expected/smp_cursor.out +++ b/src/test/regress/expected/smp_cursor.out @@ -519,5 +519,31 @@ NOTICE: --?duration.* end; +-- cursor expr in targetlist do not smp +set enable_auto_explain = off; +explain (costs off) select a, cursor(select * from t1) from t1 limit 10; + QUERY PLAN +---------------------------------------------- + Limit + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Limit + -> Seq Scan on t1 +(4 rows) + +select a, cursor(select * from t1) from t1 limit 10; + a | ?column? +----+------------------ +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +(10 rows) + drop schema smp_cursor cascade; NOTICE: drop cascades to table t1 diff --git a/src/test/regress/sql/parallel_enable_function.sql b/src/test/regress/sql/parallel_enable_function.sql index 21ceed5821..64967b6241 100644 --- a/src/test/regress/sql/parallel_enable_function.sql +++ b/src/test/regress/sql/parallel_enable_function.sql @@ -156,6 +156,9 @@ select * from hash_srf(cursor (select * from employees)) a, hash_srf(cursor (sel explain (costs off) select hash_srf(cursor (select * from employees)) limit 10; select hash_srf(cursor (select * from employees)) limit 10; +explain (costs off) select hash_srf(cursor (select * from employees)) from employees limit 10; +select hash_srf(cursor (select * from employees)) from employees limit 10; + -- subquery cannot smp explain (costs off) select 1, (select count(*) from hash_srf(cursor (select * from employees))); select 1, (select count(*) from hash_srf(cursor (select * from employees))); diff --git a/src/test/regress/sql/smp_cursor.sql b/src/test/regress/sql/smp_cursor.sql index 8334505c59..f53242f8a9 100644 --- a/src/test/regress/sql/smp_cursor.sql +++ b/src/test/regress/sql/smp_cursor.sql @@ -79,4 +79,9 @@ fetch all xc; move xc; end; +-- cursor expr in targetlist do not smp +set enable_auto_explain = off; +explain (costs off) select a, cursor(select * from t1) from t1 limit 10; +select a, cursor(select * from t1) from t1 limit 10; + drop schema smp_cursor cascade; \ No newline at end of file -- Gitee From edd49ba180e0501316e100f9e3662b3e6fca115f Mon Sep 17 00:00:00 2001 From: jiangyan <18091841830@163.com> Date: Wed, 24 Jul 2024 12:12:20 +0800 Subject: [PATCH 104/347] =?UTF-8?q?=E5=A4=A7=E6=8B=AC=E5=8F=B7=E7=AC=94?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../process/postmaster/postmaster.cpp | 47 +++++++++---------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index f1c747e474..92fb6e1173 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -15047,32 +15047,31 @@ Datum disable_conn(PG_FUNCTION_ARGS) uint32 conn_mode = pg_atomic_read_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node); if (!WalRcvInProgress() && g_instance.pid_cxt.BarrierPreParsePID == 0) { g_instance.csn_barrier_cxt.startBarrierPreParse = true; - while (checkTimes--) { - if (knl_g_get_redo_finish_status()) { - redoDone = true; - break; - } - ereport(LOG, (errmsg("%d redo_done", redoDone))); - sleep(0.01); - } - ereport(LOG, (errmsg("%d redo_done", redoDone))); - if (!redoDone) { - if (!conn_mode) { - pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, true); - // clean redo done - pg_atomic_write_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage, false); - } - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not add lock when DN is not redo all xlog, redo done flag is false"))); + } + while (checkTimes--) { + if (knl_g_get_redo_finish_status()) { + redoDone = true; + break; } - - XLogRecPtr replay1 = GetXLogReplayRecPtrInPending(); - sleep(0.5); - XLogRecPtr replay2 = GetXLogReplayRecPtrInPending(); - if (replay1 != replay2) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not add lock when DN is not redo all xlog."))); - } + sleep(0.01); + } + ereport(LOG, (errmsg("%d redo_done", redoDone))); + if (!redoDone) { + if (!conn_mode) { + pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, true); + // clean redo done + pg_atomic_write_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage, false); + } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not add lock when DN is not redo all xlog, redo done flag is false"))); } + + XLogRecPtr replay1 = GetXLogReplayRecPtrInPending(); + sleep(0.5); + XLogRecPtr replay2 = GetXLogReplayRecPtrInPending(); + if (replay1 != replay2) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not add lock when DN is not redo all xlog."))); + } } else { pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, false); } -- Gitee From e0bd743d8f1fcb283292f17a33a9b936e8e4c0a5 Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 11 Jul 2024 21:06:19 +0800 Subject: [PATCH 105/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E8=A7=86=E5=9B=BE=E5=88=97=E4=B8=BA=E5=9F=BA=E8=A1=A8=E5=88=97?= =?UTF-8?q?=E7=9A=84=E8=A1=A8=E8=BE=BE=E5=BC=8F=E8=AE=A1=E7=AE=97=E7=BB=93?= =?UTF-8?q?=E6=9E=9C=EF=BC=8C=E4=BF=AE=E6=94=B9=E5=9F=BA=E8=A1=A8=E5=88=97?= =?UTF-8?q?=E7=9A=84=E5=8F=98=E9=87=8F=E7=B1=BB=E5=9E=8B=E5=90=8E=EF=BC=8C?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E8=A7=86=E5=9B=BE=E5=AE=95=E6=9C=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_relation.cpp | 134 ++++--- .../optimizer/commands/tablecmds.cpp | 351 +++++++++++++----- src/include/commands/tablecmds.h | 5 + src/test/regress/expected/dependent_view.out | 21 ++ src/test/regress/sql/dependent_view.sql | 10 + 5 files changed, 359 insertions(+), 162 deletions(-) diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp index 4935dac876..28c578b0e4 100755 --- a/src/common/backend/parser/parse_relation.cpp +++ b/src/common/backend/parser/parse_relation.cpp @@ -1037,50 +1037,6 @@ static bool CheckRelationColumnExists(Oid rel_oid, int2 attnum, Form_pg_attribut return true; } -static void CheckPgAttribute(Oid obj_oid, char* attName, Form_pg_attribute new_attribute) -{ - const int keyNum = 2; - Relation rel; - ScanKeyData key[keyNum]; - SysScanDesc scan; - HeapTuple tuple; - HeapTuple new_dep_tuple; - Form_pg_attribute attForm; - rel = heap_open(AttributeRelationId, RowExclusiveLock); - ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(obj_oid)); - ScanKeyInit(&key[1], Anum_pg_attribute_attname, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(attName)); - scan = systable_beginscan(rel, AttributeRelidNameIndexId, true, SnapshotSelf, keyNum, &key[0]); - tuple = systable_getnext(scan); - if (!HeapTupleIsValid(tuple)) { - systable_endscan(scan); - heap_close(rel, RowExclusiveLock); - elog(ERROR, "catalog lookup failed for column %s of relation %u", attName, obj_oid); - } - attForm = (Form_pg_attribute)GETSTRUCT(tuple); - Datum values[Natts_pg_attribute] = { 0 }; - bool nulls[Natts_pg_attribute] = { 0 }; - bool replaces[Natts_pg_attribute] = { 0 }; - values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(new_attribute->atttypid); - values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(new_attribute->attlen); - values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(new_attribute->atttypmod); - values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(new_attribute->attbyval); - values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(new_attribute->attstorage); - values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(new_attribute->attcollation); - replaces[Anum_pg_attribute_atttypid - 1] = true; - replaces[Anum_pg_attribute_attlen - 1] = true; - replaces[Anum_pg_attribute_atttypmod - 1] = true; - replaces[Anum_pg_attribute_attbyval - 1] = true; - replaces[Anum_pg_attribute_attstorage - 1] = true; - replaces[Anum_pg_attribute_attcollation - 1] = true; - new_dep_tuple = heap_modify_tuple(tuple, RelationGetDescr(rel), values, nulls, replaces); - simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(rel, new_dep_tuple); - heap_freetuple_ext(new_dep_tuple); - CommandCounterIncrement(); - systable_endscan(scan); - heap_close(rel, RowExclusiveLock); -} - static bool findDependentTable(Relation rel, Oid type_id) { bool found = false; @@ -1119,6 +1075,8 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* bool existTable = false; Oid rw_objid = InvalidOid; Oid type_id = InvalidOid; + List* originEvAction = NIL; + List* freshedEvAction = NIL; // 1. filter the valid view if (GetPgObjectValid(view_oid, objType)) { return ValidateDependValid; @@ -1129,7 +1087,6 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* return ValidateDependCircularDepend; } *list = lappend_oid(*list, view_oid); - // 2. find pg_rewrite/pg_type entry which depend on this view internally const int keyNum = 2; ScanKeyData key[keyNum]; @@ -1166,6 +1123,7 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* scan_dep = systable_beginscan(rel_dep, DependDependerIndexId, true, NULL, keyNum, key_dep); Form_pg_attribute newtuple = (Form_pg_attribute)palloc0(sizeof(FormData_pg_attribute)); bool circularDependency = false; + bool is_changed = false; while (HeapTupleIsValid((tup_dep = systable_getnext(scan_dep)))) { Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup_dep); if (depform->refclassid != RelationRelationId || depform->deptype != DEPENDENCY_NORMAL || @@ -1181,21 +1139,21 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* isValid &= CheckRelationColumnExists(dep_objid, dep_objsubid, newtuple); if (newtuple->attnum > 0) { // change pg_depend - Datum values[Natts_pg_depend] = { 0 }; - bool nulls[Natts_pg_depend] = { 0 }; - bool replaces[Natts_pg_depend] = { 0 }; - HeapTuple new_dep_tuple; - values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newtuple->attnum); - replaces[Anum_pg_depend_refobjsubid - 1] = true; - new_dep_tuple = heap_modify_tuple(tup_dep, RelationGetDescr(rel_dep), values, nulls, replaces); - simple_heap_update(rel_dep, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(rel_dep, new_dep_tuple); - heap_freetuple_ext(new_dep_tuple); - CommandCounterIncrement(); - // change pg_rewrite targetEntry - CheckPgRewriteWithDroppedColumn(dep_objid, rw_objid, newtuple, dep_objsubid, &attName, &query_str); - // change pg_attribute - CheckPgAttribute(view_oid, attName, newtuple); + if (newtuple->attnum != dep_objsubid) { + Datum values[Natts_pg_depend] = { 0 }; + bool nulls[Natts_pg_depend] = { 0 }; + bool replaces[Natts_pg_depend] = { 0 }; + HeapTuple new_dep_tuple; + values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newtuple->attnum); + replaces[Anum_pg_depend_refobjsubid - 1] = true; + new_dep_tuple = heap_modify_tuple(tup_dep, RelationGetDescr(rel_dep), values, nulls, replaces); + simple_heap_update(rel_dep, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rel_dep, new_dep_tuple); + heap_freetuple_ext(new_dep_tuple); + CommandCounterIncrement(); + } + is_changed |= UpdateChangedColumnForView(view_oid, dep_objid, dep_objsubid, rw_objid, + &originEvAction, &freshedEvAction, newtuple); } } else if (relkind == RELKIND_VIEW || relkind == RELKIND_MATVIEW) { char type = relkind == RELKIND_VIEW ? OBJECT_TYPE_VIEW : OBJECT_TYPE_MATVIEW; @@ -1211,10 +1169,8 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* // here means dep_objid is valid, we should keep the same view_oid.attr with dep_objid.dep_objsubid // find dep_objid.dep_objsubid CheckViewColumnExists(dep_objid, dep_objsubid, newtuple); - // change pg_rewrite targetEntry - CheckPgRewriteWithDroppedColumn(dep_objid, rw_objid, newtuple, dep_objsubid, &attName, &query_str); - // change pg_attribute - CheckPgAttribute(view_oid, attName, newtuple); + is_changed |= UpdateChangedColumnForView(view_oid, dep_objid, dep_objsubid, rw_objid, + &originEvAction, &freshedEvAction, newtuple); } circularDependency |= (result == ValidateDependCircularDepend); } @@ -1237,15 +1193,48 @@ static ValidateDependResult ValidateDependView(Oid view_oid, char objType, List* elog(ERROR, "The view is invalid. There is a table dependent on the view so it cannot be recompiled."); } heap_close(rel_dep, RowExclusiveLock); + // 3.3 change pg_rewrite's evAction + if (is_changed) { + UpdatePgrewriteForView(rw_objid, freshedEvAction, &query_str); + } // 4. mark the current view valid if (!circularDependency) { SetPgObjectValid(view_oid, objType, true); } /* create or replace view */ - if (!circularDependency && objType == OBJECT_TYPE_VIEW) { + if (!circularDependency && query_str != NIL) { ReplaceViewQueryFirstAfter(query_str); CommandCounterIncrement(); } + list_free_ext(query_str); + if (objType == OBJECT_TYPE_MATVIEW) { + HeapTuple tup = SearchSysCache1(RELOID, ObjectIdGetDatum(view_oid)); + Form_pg_class relform = (Form_pg_class)GETSTRUCT(tup); + Oid toastid = relform->reltoastrelid; + if (OidIsValid(toastid)) { + HeapTuple toasttuple = SearchSysCache1(RELOID, ObjectIdGetDatum(toastid)); + if (!HeapTupleIsValid(toasttuple)) { + Relation rel_class = heap_open(RelationRelationId, RowExclusiveLock); + Datum values[Natts_pg_class] = { 0 }; + bool nulls[Natts_pg_class] = { 0 }; + bool replaces[Natts_pg_class] = { 0 }; + HeapTuple newtuple; + values[Anum_pg_class_reltoastrelid - 1] = InvalidOid; + replaces[Anum_pg_class_reltoastrelid - 1] = true; + newtuple = heap_modify_tuple(tup, RelationGetDescr(rel_class), values, nulls, replaces); + simple_heap_update(rel_class, &newtuple->t_self, newtuple); + CatalogUpdateIndexes(rel_class, newtuple); + heap_freetuple_ext(newtuple); + CommandCounterIncrement(); + heap_close(rel_class, RowExclusiveLock); + } else { + ReleaseSysCache(toasttuple); + } + } + ReleaseSysCache(tup); + } + list_free(originEvAction); + list_free(freshedEvAction); /* 0 or 1 */ return (ValidateDependResult)isValid; } @@ -1420,13 +1409,14 @@ Relation parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockm } if (RelationGetRelkind(rel) == RELKIND_VIEW && - RelationGetRelid(rel) >= FirstNormalObjectId && - ValidateDependView(RelationGetRelid(rel), OBJECT_TYPE_VIEW) == ValidateDependInvalid) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("The view %s is invalid, please make it valid before operation.", - RelationGetRelationName(rel)), - errhint("Please re-add missing table fields."))); + RelationGetRelid(rel) >= FirstNormalObjectId) { + if (ValidateDependView(RelationGetRelid(rel), OBJECT_TYPE_VIEW) == ValidateDependInvalid) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The view %s is invalid, please make it valid before operation.", + RelationGetRelationName(rel)), + errhint("Please re-add missing table fields."))); + } } if (!u_sess->attr.attr_common.XactReadOnly && rel->rd_id == UserStatusRelationId) { diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 7e3061fab6..9d7e7927a9 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -243,7 +243,13 @@ typedef struct ViewInfoForAdd { char *query_string; } ViewInfoForAdd; - +/* Context for check whether the targetEntry of view's querytree has changed */ +typedef struct { + Oid relid; + int2 attnum; + Query* query; + Form_pg_attribute attForm; +} ViewQueryCheck_context; /* Struct describing one new constraint to check in Phase 3 scan */ /* Note: new NOT NULL constraints are handled elsewhere */ @@ -12142,112 +12148,273 @@ static List *CheckPgRewriteFirstAfter(Relation rel) return query_str; } +static bool check_changed_tle_walker(Node* node, ViewQueryCheck_context* context) +{ + if (node == NULL) + return false; + if (IsA(node, Var)) { + Var* var = (Var*)node; + Oid relid = InvalidOid; + RangeTblEntry* rte = rt_fetch(var->varno, context->query->rtable); + if (rte && rte->rtekind == RTE_RELATION) { + relid = rte->relid; + } else if (rte && rte->alias == NULL && rte->rtekind == RTE_JOIN && rte->joinaliasvars != NIL) { + Var* aliasvar = (Var*)list_nth(rte->joinaliasvars, var->varattno - 1); + RangeTblEntry* alias_rte = rt_fetch(aliasvar->varno, context->query->rtable); + Assert(alias_rte->rtekind == RTE_RELATION); + relid = alias_rte->relid; + } + if (relid == context->relid && + var->varattno == context->attnum) { + return !(var->vartype == context->attForm->atttypid && + var->vartypmod == context->attForm->atttypmod && + var->varcollid == context->attForm->attcollation); + } else { + return false; + } + } + return expression_tree_walker(node, (bool (*)())check_changed_tle_walker, context); +} -void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute attForm, - int2 old_attnum, char** attName, List **old_query_str) +static void CheckPgAttribute(Oid obj_oid, char* attName, Form_pg_attribute new_attribute) { - List *query_str = NIL; + if (!OidIsValid(obj_oid) || attName == NULL) { + return; + } + const int keyNum = 2; + Relation rel; + ScanKeyData key[keyNum]; + SysScanDesc scan; + HeapTuple tuple; + HeapTuple new_dep_tuple; + Form_pg_attribute attForm; + rel = heap_open(AttributeRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(obj_oid)); + ScanKeyInit(&key[1], Anum_pg_attribute_attname, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(attName)); + scan = systable_beginscan(rel, AttributeRelidNameIndexId, true, SnapshotSelf, keyNum, &key[0]); + tuple = systable_getnext(scan); + if (!HeapTupleIsValid(tuple)) { + systable_endscan(scan); + heap_close(rel, RowExclusiveLock); + elog(ERROR, "catalog lookup failed for column %s of relation %u", attName, obj_oid); + } + attForm = (Form_pg_attribute)GETSTRUCT(tuple); + Datum values[Natts_pg_attribute] = { 0 }; + bool nulls[Natts_pg_attribute] = { 0 }; + bool replaces[Natts_pg_attribute] = { 0 }; + values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(new_attribute->atttypid); + values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(new_attribute->attlen); + values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(new_attribute->atttypmod); + values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(new_attribute->attbyval); + values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(new_attribute->attstorage); + values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(new_attribute->attcollation); + replaces[Anum_pg_attribute_atttypid - 1] = true; + replaces[Anum_pg_attribute_attlen - 1] = true; + replaces[Anum_pg_attribute_atttypmod - 1] = true; + replaces[Anum_pg_attribute_attbyval - 1] = true; + replaces[Anum_pg_attribute_attstorage - 1] = true; + replaces[Anum_pg_attribute_attcollation - 1] = true; + new_dep_tuple = heap_modify_tuple(tuple, RelationGetDescr(rel), values, nulls, replaces); + simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rel, new_dep_tuple); + heap_freetuple_ext(new_dep_tuple); + CommandCounterIncrement(); + systable_endscan(scan); + heap_close(rel, RowExclusiveLock); +} + +static void UpdatePgAttributeForView(TargetEntry* old_tle, TargetEntry* new_tle, Oid view_oid, Form_pg_attribute att_form) +{ + Node* old_node = (Node*)old_tle->expr; + Node* new_node = (Node*)new_tle->expr; + Assert(nodeTag(old_node) == nodeTag(new_node)); + char* col_name = old_tle->resname; + Oid old_type = exprType(old_node); + Oid new_type = exprType(new_node); + int32 old_typmod = exprTypmod(old_node); + int32 new_typmod = exprTypmod(new_node); + if (old_type != new_type || old_typmod != new_typmod) { + // get from pg_type + HeapTuple tp; + tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(new_type)); + if (HeapTupleIsValid(tp)) { + Form_pg_type typtup = (Form_pg_type)GETSTRUCT(tp); + att_form->atttypid = new_type; + att_form->atttypmod = new_typmod; + att_form->attlen = typtup->typlen; + att_form->attbyval = typtup->typbyval; + att_form->attstorage = typtup->typstorage; + att_form->attcollation = typtup->typcollation; + ReleaseSysCache(tp); + } else { + elog(ERROR, "Cannot find the type with oid %u.", new_type); + } + CheckPgAttribute(view_oid, col_name, att_form); + } +} + +static List* GetOriginalViewQuery(Oid rw_oid) +{ + List *evAction = NIL; ScanKeyData entry; ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rw_oid)); Relation rewrite_rel = heap_open(RewriteRelationId, RowExclusiveLock); SysScanDesc rewrite_scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry); HeapTuple rewrite_tup = systable_getnext(rewrite_scan); if (!HeapTupleIsValid(rewrite_tup)) { - systable_endscan(rewrite_scan); - heap_close(rewrite_rel, RowExclusiveLock); - return; + elog(ERROR, "Cannot find the rewrite rule with oid %u.", rw_oid); } Form_pg_rewrite rewrite_form = (Form_pg_rewrite)GETSTRUCT(rewrite_tup); - if (strcmp(NameStr(rewrite_form->rulename), ViewSelectRuleName) != 0) { - systable_endscan(rewrite_scan); - heap_close(rewrite_rel, RowExclusiveLock); - return; + if (strcmp(NameStr(rewrite_form->rulename), "_RETURN") != 0) { + elog(ERROR, "The rewrite rule with oid %u has unexpected name %s.", rw_oid, NameStr(rewrite_form->rulename)); } bool is_null = false; Datum evActiomDatum = fastgetattr(rewrite_tup, Anum_pg_rewrite_ev_action, rewrite_rel->rd_att, &is_null); if (!is_null) { - Datum values[Natts_pg_rewrite] = { 0 }; - bool nulls[Natts_pg_rewrite] = { 0 }; - bool replaces[Natts_pg_rewrite] = { 0 }; char *evActionString = TextDatumGetCString(evActiomDatum); - List *evAction = (List *)stringToNode(evActionString); - Query* query = (Query*)linitial(evAction); - // change querytree's targetEntry and RTE - ListCell* lc = NULL; - foreach (lc, query->targetList) { - TargetEntry* tle = (TargetEntry*)lfirst(lc); - Index rtevarno = 0; - AttrNumber rtevarattno = 0; - if (nodeTag((Node*)tle->expr) == T_Var && tle->resorigtbl == rel_oid && - tle->resorigcol == old_attnum) { - tle->resorigcol = attForm->attnum; - Var *var = (Var *)tle->expr; - rtevarno = var->varno; - rtevarattno = var->varattno; - var->vartype = attForm->atttypid; - var->vartypmod = attForm->atttypmod; - var->varcollid = attForm->attcollation; - *attName = pstrdup(tle->resname); - } - // change rtable entry - if (rtevarno == 0 || rtevarattno == 0) { - continue; - } - RangeTblEntry* rte = rt_fetch(rtevarno, query->rtable); - if (!rte || rte->alias != NULL || rte->rtekind != RTE_JOIN || rte->joinaliasvars == NIL) { - Var *var = (Var *)tle->expr; - var->varattno = attForm->attnum; - var->varoattno = attForm->attnum; - continue; - } - Var* aliasvar = (Var*)list_nth(rte->joinaliasvars, rtevarattno - 1); - if (IsA(aliasvar, Var)) { - aliasvar->varattno = attForm->attnum; - aliasvar->varoattno = attForm->attnum; - aliasvar->vartype = attForm->atttypid; - aliasvar->vartypmod = attForm->atttypmod; - aliasvar->varcollid = attForm->attcollation; - } + evAction = (List *)stringToNode(evActionString); + } else { + elog(ERROR, "Cannot find the rewrite rule with oid %u.", rw_oid); + } + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); + return evAction; +} + +List* GetRefreshedViewQuery(Oid view_oid, Oid rw_oid) +{ + List* evAction = NIL; + Query* query = NULL; + HeapTuple tup = SearchSysCache1(RELOID, ObjectIdGetDatum(view_oid)); + if (!HeapTupleIsValid(tup)) { + elog(ERROR, "Cannot find the view with oid %u.", view_oid); + } + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tup); + char* view_def = GetCreateViewCommand(NameStr(reltup->relname), tup, reltup, rw_oid, view_oid); + List* raw_parsetree_list = raw_parser(view_def); + Node* stmtNode = (Node*)linitial(raw_parsetree_list); + Assert((IsA(stmtNode, ViewStmt) || IsA(stmtNode, CreateTableAsStmt))); + if (IsA(stmtNode, ViewStmt)) { + ViewStmt* stmt = (ViewStmt*)stmtNode; + if (!IsA(stmt->query, Query)) { + query = parse_analyze(stmt->query, view_def, NULL, 0); + } else { + query = (Query*)stmt->query; + } + } else if (IsA(stmtNode, CreateTableAsStmt)) { + CreateTableAsStmt* stmt = (CreateTableAsStmt*)stmtNode; + if (!IsA(stmt->query, Query)) { + query = parse_analyze(stmt->query, view_def, NULL, 0); + } else { + query = (Query*)stmt->query; } - char* actiontree = nodeToString((Node*)evAction); - HeapTuple new_dep_tuple; - values[Anum_pg_rewrite_ev_action - 1] = CStringGetTextDatum(actiontree); - replaces[Anum_pg_rewrite_ev_action - 1] = true; - new_dep_tuple = heap_modify_tuple(rewrite_tup, RelationGetDescr(rewrite_rel), values, nulls, replaces); - simple_heap_update(rewrite_rel, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(rewrite_rel, new_dep_tuple); - CommandCounterIncrement(); - StringInfoData buf; - initStringInfo(&buf); - Relation ev_relation = heap_open(rewrite_form->ev_class, AccessShareLock); - get_query_def(query, - &buf, - NIL, - RelationGetDescr(ev_relation), - 0, - -1, - 0, - false, - false, - NULL, - false, - false); - appendStringInfo(&buf, ";"); - ViewInfoForAdd * info = static_cast(palloc(sizeof(ViewInfoForAdd))); - info->ev_class = rewrite_form->ev_class; - info->query_string = pstrdup(buf.data); - heap_close(ev_relation, AccessShareLock); - FreeStringInfo(&buf); - query_str = lappend(query_str, info); - *old_query_str = query_str; - heap_freetuple_ext(new_dep_tuple); - pfree_ext(evActionString); - pfree_ext(actiontree); } + evAction = lappend(evAction, query); + ReleaseSysCache(tup); + pfree(view_def); + list_free(raw_parsetree_list); + return evAction; +} + +void UpdatePgrewriteForView(Oid rw_oid, List* evAction, List **query_str) +{ + List *new_query_str = NIL; + ScanKeyData entry; + ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rw_oid)); + Relation rewrite_rel = heap_open(RewriteRelationId, RowExclusiveLock); + SysScanDesc rewrite_scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry); + HeapTuple rewrite_tup = systable_getnext(rewrite_scan); + Form_pg_rewrite rewrite_form = (Form_pg_rewrite)GETSTRUCT(rewrite_tup); + // update pg_rewrite + char* actiontree = nodeToString((Node*)evAction); + Datum values[Natts_pg_rewrite] = { 0 }; + bool nulls[Natts_pg_rewrite] = { 0 }; + bool replaces[Natts_pg_rewrite] = { 0 }; + HeapTuple new_dep_tuple; + values[Anum_pg_rewrite_ev_action - 1] = CStringGetTextDatum(actiontree); + replaces[Anum_pg_rewrite_ev_action - 1] = true; + new_dep_tuple = heap_modify_tuple(rewrite_tup, RelationGetDescr(rewrite_rel), values, nulls, replaces); + simple_heap_update(rewrite_rel, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rewrite_rel, new_dep_tuple); + CommandCounterIncrement(); + heap_freetuple_ext(new_dep_tuple); + pfree_ext(actiontree); + // get new_query_str from pg_rewrite + Query* query = (Query*)linitial(evAction); + StringInfoData buf; + initStringInfo(&buf); + Relation ev_relation = heap_open(rewrite_form->ev_class, AccessShareLock); + get_query_def(query, + &buf, + NIL, + RelationGetDescr(ev_relation), + 0, + -1, + 0, + false, + false, + NULL, + false, + false); + appendStringInfo(&buf, ";"); + ViewInfoForAdd * info = static_cast(palloc(sizeof(ViewInfoForAdd))); + info->ev_class = rewrite_form->ev_class; + info->query_string = pstrdup(buf.data); + heap_close(ev_relation, AccessShareLock); + FreeStringInfo(&buf); + new_query_str = lappend(new_query_str, info); + *query_str = new_query_str; systable_endscan(rewrite_scan); heap_close(rewrite_rel, RowExclusiveLock); } +bool UpdateChangedColumnForView(Oid viewid, Oid relid, int2 attnum, Oid rw_objid, + List **p_originEvAction, List **p_newEvAction, Form_pg_attribute attForm) +{ + if (*p_originEvAction == NIL) { + *p_originEvAction = GetOriginalViewQuery(rw_objid); + } + PG_TRY(); + { + if (*p_newEvAction == NIL) { + *p_newEvAction = GetRefreshedViewQuery(viewid, rw_objid); + } + } + PG_CATCH(); + { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The view %s is invalid, please make it valid before operation.", + get_rel_name(viewid)), + errhint("Please re-add missing table fields."))); + } + PG_END_TRY(); + List* originEvAction = *p_originEvAction; + List* newEvAction = *p_newEvAction; + bool is_changed = false; + Query* query = (Query*)linitial(originEvAction); + Query* freshed_query = (Query*)linitial(newEvAction); + ViewQueryCheck_context context; + context.relid = relid; + context.attnum = attnum; + context.query = query; + context.attForm = attForm; + // check whether the querytree's targetEntry has changed, + // and update the pg_attribute entry of the changed column if so + ListCell* lc1 = NULL; + ListCell* lc2 = NULL; + forboth (lc1, query->targetList, lc2, freshed_query->targetList) { + TargetEntry* tle = (TargetEntry*)lfirst(lc1); + bool var_changed = check_changed_tle_walker((Node*)(tle->expr), &context); + if (var_changed) { + TargetEntry* tle2 = (TargetEntry*)lfirst(lc2); + UpdatePgAttributeForView(tle, tle2, viewid, attForm); + } + is_changed |= var_changed; + } + return is_changed; +} + /* * create or replace view when the table has view. * 1. add column with first or after col_name. @@ -32758,7 +32925,7 @@ static void ATPrepAlterModifyColumn(List** wqueue, AlteredTableInfo* tab, Relati def->raw_default = tmp_expr; } -static char* GetCreateViewCommand(const char *rel_name, HeapTuple tup, Form_pg_class reltup, Oid pg_rewrite_oid, Oid view_oid) +char* GetCreateViewCommand(const char *rel_name, HeapTuple tup, Form_pg_class reltup, Oid pg_rewrite_oid, Oid view_oid) { StringInfoData buf; ViewInfoForAdd* view_info = NULL; @@ -32767,9 +32934,13 @@ static char* GetCreateViewCommand(const char *rel_name, HeapTuple tup, Form_pg_c const char* ns_name = quote_identifier(get_namespace_name(reltup->relnamespace)); initStringInfo(&buf); - appendStringInfo(&buf, "CREATE OR REPLACE "); - if (reltup->relpersistence == RELPERSISTENCE_TEMP) { - appendStringInfo(&buf, "TEMPORARY "); + if (reltup->relkind == RELKIND_MATVIEW) { + appendStringInfo(&buf, "CREATE MATERIALIZED "); + } else { + appendStringInfo(&buf, "CREATE OR REPLACE "); + if (reltup->relpersistence == RELPERSISTENCE_TEMP) { + appendStringInfo(&buf, "TEMPORARY "); + } } if (ns_name) { appendStringInfo(&buf, "VIEW %s.%s(", ns_name, quote_identifier(NameStr(reltup->relname))); diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index b7bb4c5155..ed21cd35fb 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -238,7 +238,12 @@ extern void CheckRelAutoIncrementIndex(Oid relid, LOCKMODE lockmode); extern void RebuildDependViewForProc(Oid proc_oid); extern void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute attForm, int2 old_attnum, char** attName, List **old_query_str); +extern void UpdatePgrewriteForView(Oid rw_oid, List* evAction, List **query_str); +extern List* GetRefreshedViewQuery(Oid view_oid, Oid rw_oid); +extern bool UpdateChangedColumnForView(Oid viewid, Oid relid, int2 attnum, Oid rw_objid, + List **originEvAction1, List **newEvAction1, Form_pg_attribute attForm); extern void ReplaceViewQueryFirstAfter(List *query_str); +extern char* GetCreateViewCommand(const char *rel_name, HeapTuple tup, Form_pg_class reltup, Oid pg_rewrite_oid, Oid view_oid); #ifdef USE_SPQ extern void spq_btbuild_update_pg_class(Relation heap, Relation index); #endif diff --git a/src/test/regress/expected/dependent_view.out b/src/test/regress/expected/dependent_view.out index 5459a3d3ea..9ce60f93df 100644 --- a/src/test/regress/expected/dependent_view.out +++ b/src/test/regress/expected/dependent_view.out @@ -671,6 +671,23 @@ select * from t12;-- ok (3,4) (2 rows) +-- test6 expression dependency +create table expr_dependency_t1(id int); +create view expr_dependency_v1 as select id + 1 from expr_dependency_t1; +alter table expr_dependency_t1 modify id int8; +select * from expr_dependency_v1; + ?column? +---------- +(0 rows) + +create table expr_dependency_t2(name varchar(10)); +create view expr_dependency_v2 as select name || 'aa' from expr_dependency_t2; +alter table expr_dependency_t2 modify name int; +select * from expr_dependency_v2; + ?column? +---------- +(0 rows) + --- clean drop schema dependent_view cascade; --?.* @@ -687,3 +704,7 @@ drop cascades to view circular_dependency_v3 drop cascades to table t11 drop cascades to view v11 drop cascades to table t12 +drop cascades to table expr_dependency_t1 +drop cascades to view expr_dependency_v1 +drop cascades to table expr_dependency_t2 +drop cascades to view expr_dependency_v2 diff --git a/src/test/regress/sql/dependent_view.sql b/src/test/regress/sql/dependent_view.sql index 2c0c706ef5..87a92fb565 100644 --- a/src/test/regress/sql/dependent_view.sql +++ b/src/test/regress/sql/dependent_view.sql @@ -245,6 +245,16 @@ alter table t11 modify b numeric; select * from t12;-- ok select * from v11;-- expect error select * from t12;-- ok +-- test6 expression dependency +create table expr_dependency_t1(id int); +create view expr_dependency_v1 as select id + 1 from expr_dependency_t1; +alter table expr_dependency_t1 modify id int8; +select * from expr_dependency_v1; + +create table expr_dependency_t2(name varchar(10)); +create view expr_dependency_v2 as select name || 'aa' from expr_dependency_t2; +alter table expr_dependency_t2 modify name int; +select * from expr_dependency_v2; --- clean drop schema dependent_view cascade; -- Gitee From 34978f83c2016b6b8afd4ba442c26c7b28e07766 Mon Sep 17 00:00:00 2001 From: shenzheng4 Date: Wed, 24 Jul 2024 11:27:33 +0800 Subject: [PATCH 106/347] L0_record_query_plan --- .../statement/instr_handle_mgr.cpp | 4 +- .../instruments/statement/instr_statement.cpp | 35 +++++++-- .../process/threadpool/knl_session.cpp | 5 ++ src/gausskernel/runtime/executor/execMain.cpp | 12 ++- src/gausskernel/storage/lmgr/proc.cpp | 78 ++++++++++++++++++- src/include/executor/executor.h | 5 ++ src/include/instruments/instr_statement.h | 1 + src/include/knl/knl_session.h | 5 ++ src/include/storage/proc.h | 1 + .../expected/instr_query_plan_threshold.out | 38 ++++++++- .../sql/instr_query_plan_threshold.sql | 15 +++- 11 files changed, 185 insertions(+), 14 deletions(-) diff --git a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp index 4565d0e78b..480bbbb7f4 100644 --- a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp @@ -233,7 +233,7 @@ static void print_stmt_basic_debug_log(int log_level) errmsg("\t soft parse: %lu", CURRENT_STMT_METRIC_HANDLE->parse.soft_parse))); ereport(log_level, (errmodule(MOD_INSTR), errmsg("\t hard parse: %lu", CURRENT_STMT_METRIC_HANDLE->parse.hard_parse))); - if (CURRENT_STMT_METRIC_HANDLE->level == STMT_TRACK_L1 || CURRENT_STMT_METRIC_HANDLE->level == STMT_TRACK_L2) { + if (CURRENT_STMT_METRIC_HANDLE->level >= STMT_TRACK_L0 || CURRENT_STMT_METRIC_HANDLE->level <= STMT_TRACK_L2) { ereport(log_level, (errmodule(MOD_INSTR), errmsg("\t query plan size: %lu", CURRENT_STMT_METRIC_HANDLE->plan_size))); ereport(log_level, (errmodule(MOD_INSTR), @@ -320,7 +320,7 @@ static void print_stmt_net_debug_log(int log_level) static void print_stmt_summary_lock_debug_log(int log_level) { ereport(log_level, (errmodule(MOD_INSTR), errmsg("6, ----Lock Summary Info Area----"))); - if (CURRENT_STMT_METRIC_HANDLE->level == STMT_TRACK_L1 || CURRENT_STMT_METRIC_HANDLE->level == STMT_TRACK_L2) { + if (CURRENT_STMT_METRIC_HANDLE->level >= STMT_TRACK_L0 && CURRENT_STMT_METRIC_HANDLE->level <= STMT_TRACK_L2) { ereport(log_level, (errmodule(MOD_INSTR), errmsg("\t lock cnt: %ld", CURRENT_STMT_METRIC_HANDLE->lock_summary.lock_cnt))); ereport(log_level, (errmodule(MOD_INSTR), diff --git a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp index ee7be526a6..09ec6cdfeb 100755 --- a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp @@ -67,6 +67,7 @@ #include "commands/copy.h" #include "storage/lmgr.h" #include "instruments/instr_func_control.h" +#include "storage/proc.h" #define MAX_SLOW_QUERY_RETENSION_DAYS 604800 #define MAX_FULL_SQL_RETENSION_SEC 86400 @@ -2238,8 +2239,8 @@ void instr_stmt_report_unique_sql_info(const PgStat_TableCounts *agg_table_stat, static inline bool instr_stmt_level_fullsql_open() { int fullsql_level = u_sess->statement_cxt.statement_level[0]; - /* only record query plan when level >= L1 */ - return fullsql_level >= STMT_TRACK_L1 && fullsql_level <= STMT_TRACK_L2; + /* record query plan when level >= L0 */ + return fullsql_level >= STMT_TRACK_L0 && fullsql_level <= STMT_TRACK_L2; } static inline bool instr_stmt_level_slowsql_only_open() @@ -2249,8 +2250,8 @@ static inline bool instr_stmt_level_slowsql_only_open() } int slowsql_level = u_sess->statement_cxt.statement_level[1]; - /* only record query plan when level >= L1 */ - return slowsql_level >= STMT_TRACK_L1 && slowsql_level <= STMT_TRACK_L2; + /* record query plan when level >= L0 */ + return slowsql_level >= STMT_TRACK_L0 && slowsql_level <= STMT_TRACK_L2; } static inline bool instr_stmt_is_slowsql() @@ -2282,11 +2283,29 @@ bool instr_stmt_need_track_plan() return false; } +void instr_stmt_exec_report_query_plan(QueryDesc *queryDesc) +{ + if (instr_stmt_level_fullsql_open()) { + instr_stmt_report_query_plan(queryDesc); + return; + } + + if (instr_stmt_level_slowsql_only_open()) { + if (CURRENT_STMT_METRIC_HANDLE->slow_query_threshold == 0) { + instr_stmt_report_query_plan(queryDesc); + return; + } + + int delayms = u_sess->attr.attr_storage.log_min_duration_statement; + (void)enable_query_plan_sig_alarm(delayms); + } +} + void instr_stmt_report_query_plan(QueryDesc *queryDesc) { StatementStatContext *ssctx = (StatementStatContext *)u_sess->statement_cxt.curStatementMetrics; - if (queryDesc == NULL || ssctx == NULL || ssctx->level <= STMT_TRACK_L0 - || ssctx->level > STMT_TRACK_L2 || (ssctx->plan_size != 0 && !u_sess->unique_sql_cxt.is_open_cursor) + if (queryDesc == NULL || ssctx == NULL || ssctx->level > STMT_TRACK_L2 + || (ssctx->plan_size != 0 && !u_sess->unique_sql_cxt.is_open_cursor) || (u_sess->statement_cxt.executer_run_level > 1 && !IS_UNIQUE_SQL_TRACK_ALL)) { return; } @@ -2313,6 +2332,10 @@ void instr_stmt_report_query_plan(QueryDesc *queryDesc) (errmodule(MOD_INSTR), errmsg("exec_auto_explain %s %s to %lu", ssctx->query_plan, queryDesc->sourceText, u_sess->unique_sql_cxt.unique_sql_id))); pfree(es.str->data); + + if (u_sess->statement_cxt.is_exceed_query_plan_threshold) { + u_sess->statement_cxt.is_exceed_query_plan_threshold = false; + } } /* check the header and valid length of the detail binary data */ diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 42362a9387..6717a35169 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -1167,6 +1167,11 @@ static void knl_u_statement_init(knl_u_statement_context* statement_cxt) statement_cxt->wait_events_bms = NULL; statement_cxt->enable_wait_events_bitmap = false; statement_cxt->is_session_bms_active = false; + + statement_cxt->root_query_plan = NULL; + statement_cxt->query_plan_threshold_active = false; + statement_cxt->is_exceed_query_plan_threshold = false; + statement_cxt->record_query_plan_fin_time = 0; } void knl_u_relmap_init(knl_u_relmap_context* relmap_cxt) diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp index a47d075111..739363a4e4 100755 --- a/src/gausskernel/runtime/executor/execMain.cpp +++ b/src/gausskernel/runtime/executor/execMain.cpp @@ -466,11 +466,12 @@ void ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long count) int instrument_option = 0; bool has_track_operator = false; char* old_stmt_name = u_sess->pcache_cxt.cur_stmt_name; + u_sess->statement_cxt.root_query_plan = queryDesc; u_sess->statement_cxt.executer_run_level++; if (u_sess->SPI_cxt._connected >= 0) { u_sess->pcache_cxt.cur_stmt_name = NULL; } - instr_stmt_report_query_plan(queryDesc); + instr_stmt_exec_report_query_plan(queryDesc); exec_explain_plan(queryDesc); if (u_sess->attr.attr_resource.use_workload_manager && u_sess->attr.attr_resource.resource_track_level == RESOURCE_TRACK_OPERATOR && @@ -721,6 +722,7 @@ void ExecutorFinish(QueryDesc *queryDesc) } else { standard_ExecutorFinish(queryDesc); } + } void standard_ExecutorFinish(QueryDesc *queryDesc) @@ -772,6 +774,14 @@ void standard_ExecutorFinish(QueryDesc *queryDesc) */ void ExecutorEnd(QueryDesc *queryDesc) { + /* + * for a very few cases, query plan not be recorded during the execution phase, + * we record again before executor end. + */ + if (unlikely(u_sess->statement_cxt.is_exceed_query_plan_threshold)) { + instr_stmt_report_query_plan(queryDesc); + } + if (ExecutorEnd_hook) { (*ExecutorEnd_hook)(queryDesc); } else { diff --git a/src/gausskernel/storage/lmgr/proc.cpp b/src/gausskernel/storage/lmgr/proc.cpp index 2ee2b1d995..931920101f 100755 --- a/src/gausskernel/storage/lmgr/proc.cpp +++ b/src/gausskernel/storage/lmgr/proc.cpp @@ -73,6 +73,7 @@ #include "instruments/percentile.h" #include "instruments/snapshot.h" #include "instruments/instr_statement.h" +#include "instruments/instr_handle_mgr.h" #include "utils/builtins.h" #include "instruments/ash.h" #include "pgaudit.h" @@ -96,6 +97,7 @@ static void CheckIdleInTransactionSessionTimeout(void); static bool CheckStandbyTimeout(void); static void FiniNuma(int code, Datum arg); static inline void ReleaseChildSlot(void); +static void CheckQueryPlanThreshold(); /* * Report shared-memory space needed by InitProcGlobal. @@ -2628,6 +2630,59 @@ bool enable_lockwait_sig_alarm(int delayms) return true; } +bool enable_query_plan_sig_alarm(int delayms) +{ + if (CURRENT_STMT_METRIC_HANDLE == NULL || u_sess->statement_cxt.stmt_stat_cxt == NULL) { + return true; + } + + if (u_sess->statement_cxt.query_plan_threshold_active) { + return true; + } + + TimestampTz start_time = CURRENT_STMT_METRIC_HANDLE->start_time; + u_sess->statement_cxt.record_query_plan_fin_time = TimestampTzPlusMilliseconds(start_time, delayms); + + /* if query plan timeout > session timeout or statement timeout, + * don't set timer to ensure that sess or statement timeout take effect + */ + if (t_thrd.storage_cxt.statement_timeout_active && + (u_sess->statement_cxt.record_query_plan_fin_time >= t_thrd.storage_cxt.statement_fin_time)) { + return true; + } + + if (u_sess->storage_cxt.session_timeout_active && + (u_sess->statement_cxt.record_query_plan_fin_time >= u_sess->storage_cxt.session_fin_time)) { + return true; + } + +#ifndef ENABLE_MULTIPLE_NODES + if (u_sess->storage_cxt.idle_in_transaction_session_timeout_active && + (u_sess->statement_cxt.record_query_plan_fin_time >= + u_sess->storage_cxt.idle_in_transaction_session_fin_time)) { + return true; + } +#endif + u_sess->statement_cxt.query_plan_threshold_active = true; + + long secs; + int usecs; + /* start to set signal timer, if now >= query plan timeout, start 1us timer */ + TimestampDifference(GetCurrentTimestamp(), u_sess->statement_cxt.record_query_plan_fin_time, &secs, &usecs); + if (secs == 0 && usecs == 0) { + usecs = 1; + } + + struct itimerval timeval; + errno_t rc = memset_s(&timeval, sizeof(struct itimerval), 0, sizeof(struct itimerval)); + securec_check(rc, "\0", "\0"); + timeval.it_value.tv_sec = secs; + timeval.it_value.tv_usec = usecs; + if (gs_signal_settimer(&timeval)) + return false; + return true; +} + /* Enable the session timeout timer. */ bool enable_session_sig_alarm(int delayms) { @@ -2799,13 +2854,15 @@ bool disable_sig_alarm(bool is_statement_timeout) * We will re-enable the interrupt if necessary in CheckStatementTimeout. */ if (t_thrd.storage_cxt.statement_timeout_active || t_thrd.storage_cxt.deadlock_timeout_active || - t_thrd.storage_cxt.lockwait_timeout_active || t_thrd.wlm_cxt.wlmalarm_timeout_active) { + t_thrd.storage_cxt.lockwait_timeout_active || t_thrd.wlm_cxt.wlmalarm_timeout_active || + u_sess->statement_cxt.query_plan_threshold_active) { if (gs_signal_canceltimer()) { t_thrd.storage_cxt.statement_timeout_active = false; t_thrd.storage_cxt.cancel_from_timeout = false; t_thrd.storage_cxt.deadlock_timeout_active = false; t_thrd.storage_cxt.lockwait_timeout_active = false; t_thrd.wlm_cxt.wlmalarm_timeout_active = false; + u_sess->statement_cxt.query_plan_threshold_active = false; return false; } } @@ -2814,6 +2871,7 @@ bool disable_sig_alarm(bool is_statement_timeout) t_thrd.storage_cxt.deadlock_timeout_active = false; t_thrd.storage_cxt.lockwait_timeout_active = false; t_thrd.wlm_cxt.wlmalarm_timeout_active = false; + u_sess->statement_cxt.query_plan_threshold_active = false; /* Cancel or reschedule statement timeout */ if (is_statement_timeout) { @@ -3045,6 +3103,10 @@ void handle_sig_alarm(SIGNAL_ARGS) } } + if (u_sess->statement_cxt.query_plan_threshold_active) { + CheckQueryPlanThreshold(); + } + errno = save_errno; } @@ -3326,3 +3388,17 @@ void BecomeLockGroupMember(PGPROC *leader) dlist_push_tail(&leader->lockGroupMembers, &t_thrd.proc->lockGroupLink); LWLockRelease(leaderLwlock); } + +void CheckQueryPlanThreshold() +{ + if (u_sess->attr.attr_storage.log_min_duration_statement == 0 || + !u_sess->statement_cxt.query_plan_threshold_active) { + return; + } + + TimestampTz now = GetCurrentTimestamp(); + if (now > u_sess->statement_cxt.record_query_plan_fin_time) { + u_sess->statement_cxt.is_exceed_query_plan_threshold = true; + u_sess->statement_cxt.query_plan_threshold_active = false; + } +} diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 0889f858bf..c587c01198 100755 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -263,6 +263,7 @@ extern TupleTableSlot* FetchPlanSlot(PlanState* subPlanState, ProjectionInfo** p extern long ExecGetPlanMemCost(Plan* node); extern bool executorEarlyStop(); +extern void instr_stmt_report_query_plan(QueryDesc *queryDesc); /* ---------------------------------------------------------------- * ExecProcNode @@ -275,6 +276,10 @@ extern bool executorEarlyStop(); static inline TupleTableSlot *ExecProcNode(PlanState *node) { + if (u_sess->statement_cxt.is_exceed_query_plan_threshold) { + instr_stmt_report_query_plan((QueryDesc *)u_sess->statement_cxt.root_query_plan); + u_sess->statement_cxt.root_query_plan = NULL; + } TupleTableSlot* result; Assert(node->ExecProcNode); diff --git a/src/include/instruments/instr_statement.h b/src/include/instruments/instr_statement.h index 56712bec08..9df4ba72cf 100644 --- a/src/include/instruments/instr_statement.h +++ b/src/include/instruments/instr_statement.h @@ -225,6 +225,7 @@ extern void instr_stmt_report_unique_sql_info(const PgStat_TableCounts *agg_tabl extern void instr_stmt_report_txid(uint64 txid); extern void instr_stmt_report_query(uint64 unique_query_id); extern void instr_stmt_report_query_plan(QueryDesc *queryDesc); +extern void instr_stmt_exec_report_query_plan(QueryDesc *queryDesc); extern void instr_stmt_report_debug_query_id(uint64 debug_query_id); extern void instr_stmt_report_trace_id(char *trace_id); extern void instr_stmt_report_start_time(); diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 3184dcf9ef..2866df9d79 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -2358,6 +2358,11 @@ typedef struct knl_u_statement_context { bool enable_wait_events_bitmap; /* change to true in init stage of stmt handle */ int64 current_row_count; /* Record the number of rows affected by current query */ int64 last_row_count; /* Record the number of rows affected by last query */ + + void *root_query_plan; /* Record the root query plan before report */ + bool query_plan_threshold_active; /* active if need start query_plan threshold timer */ + bool is_exceed_query_plan_threshold; /* if true when slow sql take effect */ + TimestampTz record_query_plan_fin_time; /* finish time when execute time exceed log_min_duration_statement */ } knl_u_statement_context; struct Qid_key { diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index f93d52724f..d8f8403804 100755 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -554,6 +554,7 @@ extern bool enable_sig_alarm(int delayms, bool is_statement_timeout); extern bool enable_lockwait_sig_alarm(int delayms); extern bool enable_session_sig_alarm(int delayms); extern bool enable_idle_in_transaction_session_sig_alarm(int delayms); +extern bool enable_query_plan_sig_alarm(int delayms); extern bool disable_session_sig_alarm(void); extern bool disable_idle_in_transaction_session_sig_alarm(void); diff --git a/src/test/regress/expected/instr_query_plan_threshold.out b/src/test/regress/expected/instr_query_plan_threshold.out index d483478b98..3b2228f628 100644 --- a/src/test/regress/expected/instr_query_plan_threshold.out +++ b/src/test/regress/expected/instr_query_plan_threshold.out @@ -11,7 +11,7 @@ select reset_unique_sql('GLOBAL', 'ALL', 0); (1 row) -- full sql -set track_stmt_stat_level = 'L1,OFF'; +set track_stmt_stat_level = 'L0,OFF'; set log_min_duration_statement = '0ms'; set statement_timeout = '0ms'; delete statement_history; @@ -71,7 +71,7 @@ select count(*) from statement_history where query like '%from query_plan_table% (1 row) -- slow sql -set track_stmt_stat_level = 'OFF,L1'; +set track_stmt_stat_level = 'OFF,L0'; set log_min_duration_statement = '0ms'; delete statement_history; select reset_unique_sql('GLOBAL', 'ALL', 0); @@ -180,6 +180,40 @@ select count(*) from statement_history where query like '%from query_plan_table% 2 (1 row) +set log_min_duration_statement = '2s'; +set statement_timeout = '1s'; +delete statement_history; +select reset_unique_sql('GLOBAL', 'ALL', 0); + reset_unique_sql +------------------ + t +(1 row) + +select t1.id from query_plan_table t1 where t1.id = 1; + id +---- + 1 +(1 row) + +select t1.num from query_plan_table t1 where t1.num = 1; + num +----- + 1 +(1 row) + +select t1.num, pg_sleep(1) from query_plan_table t1 where t1.num = 1; +ERROR: canceling statement due to statement timeout +CONTEXT: referenced column: pg_sleep +select pg_sleep(1); +ERROR: canceling statement due to statement timeout +CONTEXT: referenced column: pg_sleep +-- expect 0 row +select count(*) from statement_history where query like '%from query_plan_table%' and query_plan is not null; + count +------- + 0 +(1 row) + set track_stmt_stat_level = 'OFF,L0'; set log_min_duration_statement = '30min'; set statement_timeout = '0ms'; diff --git a/src/test/regress/sql/instr_query_plan_threshold.sql b/src/test/regress/sql/instr_query_plan_threshold.sql index dce7ff4eec..a70bea87d4 100644 --- a/src/test/regress/sql/instr_query_plan_threshold.sql +++ b/src/test/regress/sql/instr_query_plan_threshold.sql @@ -7,7 +7,7 @@ insert into query_plan_table select generate_series(1,10000),generate_series(1,1 select reset_unique_sql('GLOBAL', 'ALL', 0); -- full sql -set track_stmt_stat_level = 'L1,OFF'; +set track_stmt_stat_level = 'L0,OFF'; set log_min_duration_statement = '0ms'; set statement_timeout = '0ms'; delete statement_history; @@ -28,7 +28,7 @@ select pg_sleep(1); select count(*) from statement_history where query like '%from query_plan_table%' and query_plan is not null; -- slow sql -set track_stmt_stat_level = 'OFF,L1'; +set track_stmt_stat_level = 'OFF,L0'; set log_min_duration_statement = '0ms'; delete statement_history; select reset_unique_sql('GLOBAL', 'ALL', 0); @@ -59,6 +59,17 @@ select pg_sleep(1); -- expect 2 row select count(*) from statement_history where query like '%from query_plan_table%' and query_plan is not null; +set log_min_duration_statement = '2s'; +set statement_timeout = '1s'; +delete statement_history; +select reset_unique_sql('GLOBAL', 'ALL', 0); +select t1.id from query_plan_table t1 where t1.id = 1; +select t1.num from query_plan_table t1 where t1.num = 1; +select t1.num, pg_sleep(1) from query_plan_table t1 where t1.num = 1; +select pg_sleep(1); +-- expect 0 row +select count(*) from statement_history where query like '%from query_plan_table%' and query_plan is not null; + set track_stmt_stat_level = 'OFF,L0'; set log_min_duration_statement = '30min'; set statement_timeout = '0ms'; -- Gitee From 4ad7ea0cc317d1772699b3ef71be104134a6c5bd Mon Sep 17 00:00:00 2001 From: q00421813 Date: Sun, 28 Jul 2024 11:27:29 +0800 Subject: [PATCH 107/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BA=8B=E5=8A=A1?= =?UTF-8?q?=E5=B9=B6=E5=8F=91bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/ubtree/ubtutils.cpp | 5 ----- src/gausskernel/storage/access/ustore/knl_uheap.cpp | 4 ++-- src/gausskernel/storage/access/ustore/knl_uvisibility.cpp | 3 +++ 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/gausskernel/storage/access/ubtree/ubtutils.cpp b/src/gausskernel/storage/access/ubtree/ubtutils.cpp index 64030501b0..83687a96d8 100644 --- a/src/gausskernel/storage/access/ubtree/ubtutils.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtutils.cpp @@ -427,11 +427,6 @@ static bool UBTreeVisibilityCheckWrap(IndexScanDesc scan, Page page, OffsetNumbe */ TransactionIdStatus UBTreeCheckXid(TransactionId xid) { - if (xid == FrozenTransactionId || (TransactionIdIsNormal(xid) && - TransactionIdPrecedes(xid, pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid)) && - !RecoveryInProgress())) { - return XID_COMMITTED; - } TransactionIdStatus ts = TransactionIdGetStatus(xid); /* Please refer to HeapTupleSatisfiesVaccum */ if (ts == XID_INPROGRESS) { diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 6a58e3060b..47f9d494d4 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -531,6 +531,7 @@ void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fxid, Pag static ShortTransactionId UHeapTupleSetModifiedXid(Relation relation, Buffer buffer, UHeapTuple utuple, TransactionId xid) { + Assert(!UHEAP_XID_IS_LOCK(utuple->disk_tuple->flag)); TransactionId xidbase = InvalidTransactionId; ShortTransactionId tupleXid = 0; UHeapTupleCopyBaseFromPage(utuple, BufferGetPage(buffer)); @@ -3106,10 +3107,8 @@ check_tup_satisfies_update: /* oldtup should be pointing to right place in page */ Assert(oldtup.disk_tuple == (UHeapDiskTuple)UPageGetRowData(page, lp)); - int16 tmpLockInfo = oldtup.disk_tuple->flag & SINGLE_LOCKER_INFOMASK; UHeapTupleHeaderSetTDSlot(oldtup.disk_tuple, oldtupNewTransSlot); oldtup.disk_tuple->flag &= ~UHEAP_VIS_STATUS_MASK; - oldtup.disk_tuple->flag |= tmpLockInfo; oldtup.disk_tuple->flag |= infomaskOldTuple; tupleXid = UHeapTupleSetModifiedXid(relation, buffer, &oldtup, fxid); @@ -3119,6 +3118,7 @@ check_tup_satisfies_update: uheaptup->disk_tuple->flag |= infomaskNewTuple; uheaptup->xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; if (buffer == newbuf) { + Assert(!UHEAP_XID_IS_LOCK(uheaptup->disk_tuple->flag)); uheaptup->disk_tuple->flag |= SINGLE_LOCKER_XID_IS_TRANS; UHeapTupleSetRawXid(uheaptup, tupleXid); } else { diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index 738841c31b..b5340c9984 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -73,6 +73,7 @@ static UHeapTupleStatus UHeapTupleGetStatus(const UHeapTuple utup) return UHEAPTUPLESTATUS_MULTI_LOCKED; } else if ((SINGLE_LOCKER_XID_IS_EXCL_LOCKED(infomask) || SINGLE_LOCKER_XID_IS_SHR_LOCKED(infomask)) && TransactionIdIsNormal(locker) && !TransactionIdOlderThanFrozenXid(locker)) { + Assert(!UHEAP_XID_IS_TRANS(utuple->flag)); return UHEAPTUPLESTATUS_LOCKED; // locked by select-for-update or select-for-share } else if (infomask & UHEAP_INPLACE_UPDATED) { return UHEAPTUPLESTATUS_INPLACE_UPDATED; // modified or locked by lock-for-update @@ -243,6 +244,7 @@ bool UHeapTupleSatisfiesVisibility(UHeapTuple uhtup, Snapshot snapshot, Buffer b if (utuple != NULL && TransactionIdIsNormal(fxid) && IsMVCCSnapshot(snapshot) && SINGLE_LOCKER_XID_IS_EXCL_LOCKED(utuple->disk_tuple->flag)) { Assert(UHEAP_XID_IS_EXCL_LOCKED(utuple->disk_tuple->flag)); + Assert(!UHEAP_XID_IS_TRANS(utuple->disk_tuple->flag)); lockerXid = UHeapTupleGetRawXid(utuple); tupleIsExclusivelyLocked = true; } @@ -1261,6 +1263,7 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer UHeapTupleStatus tupleStatus = UHeapTupleGetStatus(utuple); /* tuple is no longer locked by a single locker */ if (tupleStatus != UHEAPTUPLESTATUS_LOCKED && SINGLE_LOCKER_XID_IS_EXCL_LOCKED(tupleData->flag)) { + Assert(!UHEAP_XID_IS_TRANS(utuple->disk_tuple->flag)); UHeapTupleHeaderClearSingleLocker(tupleData); } -- Gitee From c9f247afd73aee36b8038e2d7f8f294f560089d6 Mon Sep 17 00:00:00 2001 From: james Date: Thu, 25 Jul 2024 11:01:29 +0800 Subject: [PATCH 108/347] add hba conf again --- src/bin/gs_guc/pg_hba.cpp | 10 +- src/common/backend/catalog/builtin_funcs.ini | 4 + src/common/backend/lib/stringinfo.cpp | 12 + src/common/backend/libpq/auth.cpp | 2 +- src/common/backend/libpq/hba.cpp | 335 +++++++++++++++++- src/common/backend/utils/init/globals.cpp | 3 +- .../rollback-post_catalog_maindb_92_944.sql | 1 + .../rollback-post_catalog_otherdb_92_944.sql | 1 + .../upgrade-post_catalog_maindb_92_944.sql | 7 + .../upgrade-post_catalog_otherdb_92_944.sql | 7 + src/include/lib/stringinfo.h | 6 + src/include/libpq/hba.h | 9 + src/include/utils/builtins.h | 1 + src/test/regress/expected/pg_ls_dir.out | 9 + src/test/regress/sql/pg_ls_dir.sql | 2 + 15 files changed, 389 insertions(+), 20 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_944.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_944.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_944.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_944.sql diff --git a/src/bin/gs_guc/pg_hba.cpp b/src/bin/gs_guc/pg_hba.cpp index e20f737dab..51a7c11b00 100644 --- a/src/bin/gs_guc/pg_hba.cpp +++ b/src/bin/gs_guc/pg_hba.cpp @@ -58,6 +58,7 @@ #include "libpq/libpq-fe.h" #include "flock.h" #include "libpq/hba.h" +#include "utils/builtins.h" #include "libpq/pqsignal.h" #include "getopt_long.h" #include "miscadmin.h" @@ -116,15 +117,6 @@ char *g_hbaDatabase = NULL; char *g_hbaUser = NULL; char *g_hbaAddr = NULL; -/* - * A single string token lexed from the HBA config file, together with whether - * the token had been quoted. - */ -typedef struct HbaToken { - char* string; - bool quoted; -} HbaToken; - /* * @@GaussDB@@ * Brief :update or add config_parameter diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 87e62eb8b8..54fbec6f8a 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -13041,4 +13041,8 @@ AddFuncGroup( AddFuncGroup( "gs_get_recv_locations", 1, AddBuiltinFunc(_0(2872), _1("gs_get_recv_locations"), _2(0), _3(false), _4(true), _5(gs_get_recv_locations), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(4, 25, 25, 25, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "received_lsn", "write_lsn", "flush_lsn", "replay_lsn"), _24(NULL), _25("gs_get_recv_locations"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: information about WAL locations"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_get_hba_conf", 1, + AddBuiltinFunc(_0(2873), _1("gs_get_hba_conf"), _2(0), _3(true), _4(true), _5(gs_get_hba_conf), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(5, 25, 25, 25, 25, 25), _22(5, 'o', 'o', 'o', 'o','o'), _23(5, "type", "database", "user", "address", "method"), _24(NULL), _25("gs_get_hba_conf"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("config: information about pg_hba conf file"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), \ No newline at end of file diff --git a/src/common/backend/lib/stringinfo.cpp b/src/common/backend/lib/stringinfo.cpp index cfe2980405..e0b1594e6e 100644 --- a/src/common/backend/lib/stringinfo.cpp +++ b/src/common/backend/lib/stringinfo.cpp @@ -341,6 +341,18 @@ void appendStringInfoChar(StringInfo str, char ch) str->data[str->len] = '\0'; } +/* + * popStringInfoChar + * + * Pop a single byte to str. + */ +void popStringInfoChar(StringInfo str) +{ + /* OK, pop the character */ + str->len--; + str->data[str->len] = '\0'; +} + /* * appendStringInfoSpaces * diff --git a/src/common/backend/libpq/auth.cpp b/src/common/backend/libpq/auth.cpp index 156682ec6a..c708321ee9 100644 --- a/src/common/backend/libpq/auth.cpp +++ b/src/common/backend/libpq/auth.cpp @@ -3749,4 +3749,4 @@ static int CheckIAMAuth(Port* port) passwd = NULL; return STATUS_OK; } -#endif +#endif \ No newline at end of file diff --git a/src/common/backend/libpq/hba.cpp b/src/common/backend/libpq/hba.cpp index c074565e5f..41dfd573e4 100644 --- a/src/common/backend/libpq/hba.cpp +++ b/src/common/backend/libpq/hba.cpp @@ -23,6 +23,9 @@ #include #include #include +#ifdef HAVE_NETINET_TCP_H +#include +#endif #include #include @@ -30,12 +33,14 @@ #include "libpq/ip.h" #include "libpq/libpq.h" #include "libpq/auth.h" +#include "libpq/hba.h" #include "pgxc/pgxc.h" #include "postmaster/postmaster.h" #include "regex/regex.h" #include "replication/walsender.h" #include "storage/smgr/fd.h" #include "storage/ipc.h" +#include "funcapi.h" #include "utils/acl.h" #include "utils/guc.h" #include "utils/lsyscache.h" @@ -63,15 +68,6 @@ typedef struct check_network_data { bool result; /* set to true if match */ } check_network_data; -/* - * A single string token lexed from the HBA config file, together with whether - * the token had been quoted. - */ -typedef struct HbaToken { - char* string; - bool quoted; -} HbaToken; - static MemoryContext tokenize_file(const char* filename, FILE* file, List** lines, List** line_nums); static List* tokenize_inc_file(List* tokens, const char* outer_filename, const char* inc_filename); static bool parse_hba_auth_opt(char* name, char* val, HbaLine* hbaline); @@ -2474,3 +2470,324 @@ HeapTuple SearchUserHostName(const char* userName, Oid* oid) *oid = HeapTupleGetOid(roleTup); return roleTup; } + +static void get_ip_str(const struct sockaddr* addr, char *ip_str) +{ + const int MAX_IP_LEN = 64; /* default ip len */ + /* parse the ip address */ + if (AF_INET6 == addr->sa_family) { + (void)inet_ntop(AF_INET6, &((struct sockaddr_in6*)addr)->sin6_addr, ip_str, MAX_IP_LEN - 1); + } else if (AF_INET == addr->sa_family) { + (void)inet_ntop(AF_INET, &((struct sockaddr_in*)addr)->sin_addr, ip_str, MAX_IP_LEN - 1); + } +} + +static int32_t pg_sockaddr_mask_cidr(struct sockaddr_storage* mask) +{ + int32_t mask_bits = 0; + + switch (mask->ss_family) { + case AF_INET: { + + struct sockaddr_in* mask4 = (struct sockaddr_in*) mask; + uint32_t mask = ntohl(mask4->sin_addr.s_addr); + while (mask) { + mask_bits += mask & 1; + mask >>= 1; + } + break; + } + +#ifdef HAVE_IPV6 + case AF_INET6: { + + struct sockaddr_in6* mask6 = (struct sockaddr_in6*) mask; + for (int i = 0; i < 16; i++) { + uint8_t byte = mask6->sin6_addr.s6_addr[i]; + while (byte) { + mask_bits += byte & 1; + byte >>= 1; + } + } + break; + } +#endif + default: + return -1; + } + return mask_bits; +} +/* + * Read the whole of pg_hba conf returning it as record + */ +Datum gs_get_hba_conf(PG_FUNCTION_ARGS) +{ + #define GS_STAT_GET_HBA_CONF_COLS 5 + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupdesc = NULL; + Tuplestorestate *tupstore = NULL; + HbaLine* hba = NULL; + StringInfo item = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + Datum values[GS_STAT_GET_HBA_CONF_COLS]; + bool nulls[GS_STAT_GET_HBA_CONF_COLS]; + + errno_t rc = EOK; + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + return (Datum)0; + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + } + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE){ + ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type must be a row type"))); + } + // only superusers can see details + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("only superusers can see details"))); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + hba = (HbaLine*)palloc0(sizeof(HbaLine)); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + item = makeStringInfo(); + + rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + /* hba_rwlock will be released when ereport ERROR or FATAL. */ + PG_ENSURE_ERROR_CLEANUP(hba_rwlock_cleanup, (Datum)0); + ListCell* line = NULL; + (void)pthread_rwlock_rdlock(&hba_rwlock); + foreach (line, g_instance.libpq_cxt.comm_parsed_hba_lines) { + /* + * memory copy here will not copy pointer types like List* and char*, + * the char* type in HbaLine will copy to session memctx by copy_hba_line() + */ + errno_t rc = memcpy_s(hba, sizeof(HbaLine), lfirst(line), sizeof(HbaLine)); + securec_check(rc, "\0", "\0"); + // reset buf + resetStringInfo(item); + /* parse the record type. */ + if(hba->conntype == ctLocal) { + appendStringInfoString(item, "local"); + } else if (hba->conntype == ctHostSSL) { + appendStringInfoString(item, "hostssl"); + } else if (hba->conntype == ctHostNoSSL) { + appendStringInfoString(item, "hostnossl"); + } else { + appendStringInfoString(item, "host"); + } + values[0] = CStringGetTextDatum(item->data); + /* parse the database. */ + resetStringInfo(item); + ListCell* cell = NULL; + foreach (cell, hba->databases) { + HbaToken* database = NULL; + database = (HbaToken*)lfirst(cell); + if(database->quoted) { + appendStringInfoChar(item, '"'); + appendStringInfoString(item, database->string); + appendStringInfoChar(item, '"'); + } else { + appendStringInfoString(item, database->string); + } + appendStringInfoChar(item, ','); + } + if(item->len > 0) { + popStringInfoChar(item); + } + values[1] = CStringGetTextDatum(item->data); + /* parse the role. */ + resetStringInfo(item); + cell = NULL; + foreach (cell, hba->roles) { + HbaToken* role = NULL; + role = (HbaToken*)lfirst(cell); + if(role->quoted) { + appendStringInfoChar(item, '"'); + appendStringInfoString(item, role->string); + appendStringInfoChar(item, '"'); + } else { + appendStringInfoString(item, role->string); + } + appendStringInfoChar(item, ','); + } + if(item->len > 0) { + popStringInfoChar(item); + } + values[2] = CStringGetTextDatum(item->data); + /* parse the IP address field */ + resetStringInfo(item); + const int MAX_IP_ADDRESS_LEN = 64; + char ipstr[MAX_IP_ADDRESS_LEN] = {'\0'}; + char maskstr[MAX_IP_ADDRESS_LEN] = {'\0'}; + char portstr[MAX_IP_ADDRESS_LEN] = {'\0'}; + if(hba->conntype == ctLocal) { + appendStringInfoString(item, " "); + } else if (hba->ip_cmp_method == ipCmpAll) { + appendStringInfoString(item, "all"); + } else if (hba->ip_cmp_method == ipCmpSameHost) { + appendStringInfoString(item, "samehost"); + } else if (hba->ip_cmp_method == ipCmpSameNet) { + appendStringInfoString(item, "samenet"); + } else { + if(hba->hostname != NULL) { + appendStringInfoString(item, hba->hostname); + } else { + get_ip_str((struct sockaddr*)&hba->addr, ipstr); + appendStringInfoString(item, ipstr); + } + int32_t mask = pg_sockaddr_mask_cidr(&hba->mask); + if(mask != -1) { + int rc = sprintf_s(maskstr, sizeof(maskstr), "%d", mask); + securec_check_ss(rc, "\0", "\0"); + appendStringInfoChar(item, '/'); + appendStringInfoString(item, maskstr); + } + } + values[3] = CStringGetTextDatum(item->data); + /* parse the method field */ + resetStringInfo(item); + if(hba->auth_method == uaTrust) { + appendStringInfoString(item, "trust"); + } else if (hba->auth_method == uaIdent) { + appendStringInfoString(item, "ident"); + if(hba->usermap != NULL) { + appendStringInfoString(item, " map="); + appendStringInfoString(item, hba->usermap); + } + } else if (hba->auth_method == uaPeer) { + appendStringInfoString(item, "peer"); + if(hba->usermap != NULL) { + appendStringInfoString(item, " map="); + appendStringInfoString(item, hba->usermap); + } + } else if (hba->auth_method == uaKrb5) { + appendStringInfoString(item, "krb5"); + if(hba->usermap != NULL) { + appendStringInfoString(item, " map="); + appendStringInfoString(item, hba->usermap); + } + if(hba->include_realm) { + appendStringInfoString(item, " include_realm=1"); + } + if(hba->krb_realm != NULL) { + appendStringInfoString(item, " krb_realm="); + appendStringInfoString(item, hba->krb_realm); + } + } else if (hba->auth_method == uaGSS) { + appendStringInfoString(item, "gss"); + if(hba->usermap != NULL) { + appendStringInfoString(item, " map="); + appendStringInfoString(item, hba->usermap); + } + if(hba->include_realm) { + appendStringInfoString(item, " include_realm=1"); + } + if(hba->krb_realm != NULL) { + appendStringInfoString(item, " krb_realm="); + appendStringInfoString(item, hba->krb_realm); + } + } else if (hba->auth_method == uaSSPI) { + appendStringInfoString(item, "sspi"); + if(hba->usermap != NULL) { + appendStringInfoString(item, " map="); + appendStringInfoString(item, hba->usermap); + } + if(hba->include_realm) { + appendStringInfoString(item, " include_realm=1"); + } + if(hba->krb_realm != NULL) { + appendStringInfoString(item, " krb_realm="); + appendStringInfoString(item, hba->krb_realm); + } + } else if (hba->auth_method == uaReject) { + appendStringInfoString(item, "reject"); + } else if (hba->auth_method == uaMD5) { + appendStringInfoString(item, "md5"); + } else if (hba->auth_method == uaSHA256) { + appendStringInfoString(item, "sha256"); + } else if (hba->auth_method == uaSM3) { + appendStringInfoString(item, "sm3"); + } else if (hba->auth_method == uaPAM) { + appendStringInfoString(item, "pam"); + if(hba->pamservice != NULL) { + appendStringInfoString(item, " pamservice="); + appendStringInfoString(item, hba->pamservice); + } + } else if (hba->auth_method == uaLDAP) { + appendStringInfoString(item, "ldap"); + if(hba->ldaptls) { + appendStringInfoString(item, " ldaptls=1"); + } + if(hba->ldapserver != NULL) { + appendStringInfoString(item, " ldapserver="); + appendStringInfoString(item, hba->ldapserver); + } + if(hba->ldapbinddn != NULL) { + appendStringInfoString(item, " ldapbinddn="); + appendStringInfoString(item, hba->ldapbinddn); + } + if(hba->ldapbindpasswd != NULL) { + appendStringInfoString(item, " ldapbindpasswd="); + appendStringInfoString(item, hba->ldapbindpasswd); + } + if(hba->ldapsearchattribute != NULL) { + appendStringInfoString(item, " ldapsearchattribute="); + appendStringInfoString(item, hba->ldapsearchattribute); + } + if(hba->ldapbasedn != NULL) { + appendStringInfoString(item, " ldapbasedn="); + appendStringInfoString(item, hba->ldapbasedn); + } + if(hba->ldapprefix != NULL) { + appendStringInfoString(item, " ldapprefix="); + appendStringInfoString(item, hba->ldapprefix); + } + if(hba->ldapsuffix != NULL) { + appendStringInfoString(item, " ldapsuffix="); + appendStringInfoString(item, hba->ldapsuffix); + } + if(hba->krb_server_hostname != NULL) { + appendStringInfoString(item, " krb_server_hostname="); + appendStringInfoString(item, hba->krb_server_hostname); + } + if(hba->ldapport > 0) { + int rc = sprintf_s(portstr, sizeof(portstr), "%d", hba->ldapport); + securec_check_ss(rc, "\0", "\0"); + appendStringInfoString(item, " ldapport="); + appendStringInfoString(item, portstr); + } + } else if (hba->auth_method == uaCert) { + appendStringInfoString(item, "cert"); + if(hba->usermap != NULL) { + appendStringInfoString(item, " map="); + appendStringInfoString(item, hba->usermap); + } + } + values[4] = CStringGetTextDatum(item->data); + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + + (void)pthread_rwlock_unlock(&hba_rwlock); + PG_END_ENSURE_ERROR_CLEANUP(hba_rwlock_cleanup, (Datum)0); + + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + DestroyStringInfo(item); + (void)MemoryContextSwitchTo(oldcontext); + return (Datum)0; +} diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 0d1f7aeb74..b42ee2c6a2 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,12 +76,13 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92943; +const uint32 GRAND_VERSION_NUM = 92944; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 HBA_CONF_VERSION_NUM = 92944; const uint32 PARALLEL_ENABLE_VERSION_NUM = 92941; const uint32 NAN_INFINITE_TEST_EXPR = 92940; const uint32 STRAIGHT_JOIN_VERSION_NUMBER = 92939; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_944.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_944.sql new file mode 100644 index 0000000000..54e8df0d7e --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_944.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_hba_conf(); \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_944.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_944.sql new file mode 100644 index 0000000000..54e8df0d7e --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_944.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_hba_conf(); \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_944.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_944.sql new file mode 100644 index 0000000000..7a0bfa3fb6 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_944.sql @@ -0,0 +1,7 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_hba_conf() CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2873; + +CREATE FUNCTION pg_catalog.gs_get_hba_conf() +RETURNS record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_get_hba_conf'; +comment on function pg_catalog.gs_get_hba_conf() is 'config: information about pg_hba conf file'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_944.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_944.sql new file mode 100644 index 0000000000..7a0bfa3fb6 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_944.sql @@ -0,0 +1,7 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_hba_conf() CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2873; + +CREATE FUNCTION pg_catalog.gs_get_hba_conf() +RETURNS record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_get_hba_conf'; +comment on function pg_catalog.gs_get_hba_conf() is 'config: information about pg_hba conf file'; \ No newline at end of file diff --git a/src/include/lib/stringinfo.h b/src/include/lib/stringinfo.h index 313aff68e6..f8bb9c9685 100644 --- a/src/include/lib/stringinfo.h +++ b/src/include/lib/stringinfo.h @@ -130,6 +130,12 @@ extern void appendStringInfoString(StringInfo str, const char* s); */ extern void appendStringInfoChar(StringInfo str, char ch); +/* ------------------------ + * popStringInfoChar + * Pop a single byte to str.. + */ +extern void popStringInfoChar(StringInfo str); + /* ------------------------ * appendStringInfoCharMacro * As above, but a macro for even more speed where it matters. diff --git a/src/include/libpq/hba.h b/src/include/libpq/hba.h index 56a5762d24..95b659a725 100644 --- a/src/include/libpq/hba.h +++ b/src/include/libpq/hba.h @@ -65,6 +65,15 @@ typedef struct HbaLine { bool remoteTrust; } HbaLine; +/* + * A single string token lexed from the HBA config file, together with whether + * the token had been quoted. + */ +typedef struct HbaToken { + char* string; + bool quoted; +} HbaToken; + /* kluge to avoid including libpq/libpq-be.h here */ typedef struct Port hbaPort; diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index f73bdd45b7..354ab5551c 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -1472,6 +1472,7 @@ extern Datum quote_nullable(PG_FUNCTION_ARGS); extern Datum show_config_by_name(PG_FUNCTION_ARGS); extern Datum set_config_by_name(PG_FUNCTION_ARGS); extern Datum show_all_settings(PG_FUNCTION_ARGS); +extern Datum gs_get_hba_conf(PG_FUNCTION_ARGS); /* lockfuncs.c */ extern Datum pg_lock_status(PG_FUNCTION_ARGS); diff --git a/src/test/regress/expected/pg_ls_dir.out b/src/test/regress/expected/pg_ls_dir.out index b9cbb6e02b..c234bc1b00 100644 --- a/src/test/regress/expected/pg_ls_dir.out +++ b/src/test/regress/expected/pg_ls_dir.out @@ -16,3 +16,12 @@ select count(*) >= 0 as ok from pg_ls_tmpdir((select oid from pg_tablespace wher t (1 row) +select * from gs_get_hba_conf(); + type | database | user | address | method +-------+----------+------+--------------+-------- + local | all | all | | trust + host | all | all | 127.0.0.1/32 | trust + host | all | all | ::1/128 | trust + host | all | all | 127.0.0.1/32 | trust +(4 rows) + diff --git a/src/test/regress/sql/pg_ls_dir.sql b/src/test/regress/sql/pg_ls_dir.sql index d05559dcc7..2180d11806 100644 --- a/src/test/regress/sql/pg_ls_dir.sql +++ b/src/test/regress/sql/pg_ls_dir.sql @@ -3,3 +3,5 @@ select count(*) > 0 as ok from pg_ls_waldir(); select count(*) >= 0 as ok from pg_ls_tmpdir(); select count(*) >= 0 as ok from pg_ls_tmpdir((select oid from pg_tablespace where spcname='pg_default')); + +select * from gs_get_hba_conf(); -- Gitee From 22f89ae62160131b4e62867634ff61cf7e7266ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E9=91=AB=E9=91=AB?= <1310342532@qq.com> Date: Mon, 29 Jul 2024 17:29:45 +0800 Subject: [PATCH 109/347] =?UTF-8?q?DSS=E6=8E=A8=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 4e05e27af4..aa61cf540b 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ dms_commit_id=4e5c455f771c9be5e78260371eed2f28474cf253 -dss_commit_id=b714d964156722f4353840c0f5bb66c92158e3c6 +dss_commit_id=083e52af8c7f965856f319554d6332b14f6b2c99 cbb_commit_id=ac8ed05be35754e77032b4c9ec9b1eba53f1d5a6 -- Gitee From 1019d31b1417c88a3883c695c7d46ae8fb1782ea Mon Sep 17 00:00:00 2001 From: hwhbj Date: Thu, 25 Jul 2024 11:10:38 +0800 Subject: [PATCH 110/347] =?UTF-8?q?jdbc=E6=94=AF=E6=8C=81=E5=85=A8?= =?UTF-8?q?=E9=93=BE=E8=B7=AF=E8=BF=BD=E8=B8=AA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + src/common/backend/catalog/builtin_funcs.ini | 6 +- .../backend/catalog/performance_views.sql | 4 +- src/common/backend/catalog/system_views.sql | 3 +- src/common/backend/utils/init/globals.cpp | 3 +- src/common/backend/utils/misc/guc.cpp | 26 + .../statement/instr_handle_mgr.cpp | 34 +- .../instruments/statement/instr_statement.cpp | 5 +- .../process/postmaster/og_record_time.cpp | 2 + src/gausskernel/process/postmaster/pgstat.cpp | 18 +- src/gausskernel/process/tcop/dest.cpp | 7 + src/gausskernel/process/tcop/postgres.cpp | 168 +++-- .../process/threadpool/knl_session.cpp | 5 + .../rollback-post_catalog_maindb_92_945.sql | 695 +++++++++++++++++ .../rollback-post_catalog_otherdb_92_945.sql | 695 +++++++++++++++++ .../upgrade-post_catalog_maindb_92_945.sql | 703 ++++++++++++++++++ .../upgrade-post_catalog_otherdb_92_945.sql | 703 ++++++++++++++++++ src/include/instruments/instr_handle_mgr.h | 3 +- .../knl/knl_guc/knl_session_attr_common.h | 1 + src/include/knl/knl_session.h | 10 + src/include/og_record_time.h | 3 + src/include/pgstat.h | 6 +- src/include/postgres.h | 2 + src/include/tcop/dest.h | 1 + .../regress/output/recovery_2pc_tools.source | 2 + 25 files changed, 3034 insertions(+), 72 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 735da230ae..39ab287849 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -811,6 +811,7 @@ ss_enable_bcast_snapshot|bool|0,0|NULL|NULL| ss_txnstatus_cache_size|int|0,524288|NULL|NULL| subscription_conflict_resolution|enum|error,apply_remote,keep_local|NULL|NULL| time_record_level|int|0,10|NULL|NULL| +enable_record_nettime|bool|0,0|NULL|NULL| ss_enable_dorado|bool|0,0|NULL|NULL| ss_stream_cluster|bool|0,0|NULL|NULL| enable_uwal|bool|0,0|NULL|NULL| diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 54fbec6f8a..76322cf17b 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -3191,7 +3191,7 @@ ), AddFuncGroup( "get_instr_unique_sql", 1, - AddBuiltinFunc(_0(5702), _1("get_instr_unique_sql"), _2(0), _3(false), _4(true), _5(get_instr_unique_sql), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(60, 19, 23, 19, 26, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 1184, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(60, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(60, "node_name", "node_id", "user_name", "user_id", "unique_sql_id", "query", "n_calls", "min_elapse_time", "max_elapse_time", "total_elapse_time", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "n_soft_parse", "n_hard_parse", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "last_updated", "sort_count", "sort_time", "sort_mem_used", "sort_spill_count", "sort_spill_size", "hash_count", "hash_time", "hash_mem_used", "hash_spill_count", "hash_spill_size", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown"), _24(NULL), _25("get_instr_unique_sql"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(5702), _1("get_instr_unique_sql"), _2(0), _3(false), _4(true), _5(get_instr_unique_sql), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(61, 19, 23, 19, 26, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 1184, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(61, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(61, "node_name", "node_id", "user_name", "user_id", "unique_sql_id", "query", "n_calls", "min_elapse_time", "max_elapse_time", "total_elapse_time", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "n_soft_parse", "n_hard_parse", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "last_updated", "sort_count", "sort_time", "sort_mem_used", "sort_spill_count", "sort_spill_size", "hash_count", "hash_time", "hash_mem_used", "hash_spill_count", "hash_spill_size", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","net_trans_time"), _24(NULL), _25("get_instr_unique_sql"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "get_instr_user_login", 1, @@ -11086,8 +11086,8 @@ AddFuncGroup( ), AddFuncGroup( "standby_statement_history", 2, - AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(71, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(71, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(71, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","parent_query_id"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(72, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(72, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(72, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","parent_query_id"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(72, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(72, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(72, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown", "parent_query_id", "net_trans_time"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(73, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(73, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(73, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown", "parent_query_id", "net_trans_time"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "statement_detail_decode", 1, diff --git a/src/common/backend/catalog/performance_views.sql b/src/common/backend/catalog/performance_views.sql index 4635a88afa..6505adbe57 100644 --- a/src/common/backend/catalog/performance_views.sql +++ b/src/common/backend/catalog/performance_views.sql @@ -4080,7 +4080,7 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp is_slow_sql := row_data.is_slow_sql; trace_id := row_data.trace_id; advise := row_data.advise; - net_send_time =row_data.net_send_time; + net_send_time := row_data.net_send_time; srt1_q := row_data.srt1_q; srt2_simple_query := row_data.srt2_simple_query; srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; @@ -4244,7 +4244,7 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp is_slow_sql := row_data.is_slow_sql; trace_id := row_data.trace_id; advise := row_data.advise; - net_send_time =row_data.net_send_time; + net_send_time := row_data.net_send_time; srt1_q := row_data.srt1_q; srt2_simple_query := row_data.srt2_simple_query; srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; diff --git a/src/common/backend/catalog/system_views.sql b/src/common/backend/catalog/system_views.sql index d8952f06c8..a33c6993d0 100644 --- a/src/common/backend/catalog/system_views.sql +++ b/src/common/backend/catalog/system_views.sql @@ -3519,7 +3519,8 @@ CREATE unlogged table statement_history( srt13_before_query bigint, srt14_after_query bigint, rtt_unknown bigint, - parent_query_id bigint + parent_query_id bigint, + net_trans_time bigint ); REVOKE ALL on table pg_catalog.statement_history FROM public; create index statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index b42ee2c6a2..d1244a9b8a 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,12 +76,13 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92944; +const uint32 GRAND_VERSION_NUM = 92945; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 NETTIME_TRACE_VERSION_NUM = 92945; const uint32 HBA_CONF_VERSION_NUM = 92944; const uint32 PARALLEL_ENABLE_VERSION_NUM = 92941; const uint32 NAN_INFINITE_TEST_EXPR = 92940; diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 12ababad01..782bd6771a 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -2098,6 +2098,19 @@ static void InitConfigureNamesBool() NULL, NULL }, + {{"enable_record_nettime", + PGC_USERSET, + NODE_SINGLENODE, + STATS_COLLECTOR, + gettext_noop("Enable record network time"), + NULL, + }, + &u_sess->attr.attr_common.enable_record_nettime, + false, + NULL, + NULL, + NULL + }, /* End-of-list marker */ {{NULL, (GucContext)0, @@ -4577,6 +4590,7 @@ static void InitializeGUCOptionsFromEnvironment(void); static void InitializeOneGUCOption(struct config_generic* gconf); static void push_old_value(struct config_generic* gconf, GucAction action); static void ReportGUCOption(struct config_generic* record); +static void ReportTraceOption(); static void reapply_stacked_values(struct config_generic* variable, struct config_string* pHolder, GucStack* stack, const char* curvalue, GucContext curscontext, GucSource cursource); static void ShowGUCConfigOption(const char* name, DestReceiver* dest); @@ -6675,6 +6689,18 @@ void BeginReportingGUCOptions(void) if (conf->flags & GUC_REPORT) ReportGUCOption(conf); } + ReportTraceOption(); +} + +/* + * notify client connection driver support trace, low version server does not send this message. + */ +static void ReportTraceOption() { + StringInfoData msgbuf; + pq_beginmessage(&msgbuf, 'S'); + pq_sendstring(&msgbuf, "server_support_trace"); + pq_sendstring(&msgbuf, "1"); + pq_endmessage(&msgbuf); } /* diff --git a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp index 480bbbb7f4..142ec0d285 100644 --- a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp @@ -113,7 +113,7 @@ void statement_init_metric_context() } init_full_sql_wait_events(); - /* commit for previous allocated handle */ + /* commit for previous allocated handle like PBE/PBE...S*/ if (u_sess->statement_cxt.curStatementMetrics != NULL) { statement_commit_metirc_context(); } @@ -390,17 +390,9 @@ static void print_stmt_debug_log() print_stmt_wait_event_log(log_level); } -/* put current handle to suspend list */ -void statement_commit_metirc_context() -{ - CHECK_STMT_HANDLE(); - - instr_stmt_report_stat_at_handle_commit(); - - instr_stmt_diff_wait_events(); - u_sess->statement_cxt.enable_wait_events_bitmap = false; - print_stmt_debug_log(); +void commit_metirc_context() { + CHECK_STMT_HANDLE(); (void)syscalllockAcquire(&u_sess->statement_cxt.list_protect); /* @@ -418,7 +410,7 @@ void statement_commit_metirc_context() CURRENT_STMT_METRIC_HANDLE->slow_query_threshold && CURRENT_STMT_METRIC_HANDLE->slow_query_threshold >= 0 && (!u_sess->attr.attr_common.track_stmt_parameter || - (u_sess->attr.attr_common.track_stmt_parameter && CURRENT_STMT_METRIC_HANDLE->timeModel[0] > 0))))) { + (u_sess->attr.attr_common.track_stmt_parameter && CURRENT_STMT_METRIC_HANDLE->timeModel[0] > 0))))) { /* need to persist, put to suspend list */ CURRENT_STMT_METRIC_HANDLE->next = u_sess->statement_cxt.suspendStatementList; u_sess->statement_cxt.suspendStatementList = CURRENT_STMT_METRIC_HANDLE; @@ -435,7 +427,23 @@ void statement_commit_metirc_context() ereport(DEBUG1, (errmodule(MOD_INSTR), errmsg("[Statement] commit - free list length: %d, suspend list length: %d", - u_sess->statement_cxt.free_count, u_sess->statement_cxt.suspend_count))); + u_sess->statement_cxt.free_count, u_sess->statement_cxt.suspend_count))); +} + +/* put current handle to suspend list */ +void statement_commit_metirc_context(bool commit_delay) +{ + CHECK_STMT_HANDLE(); + + instr_stmt_report_stat_at_handle_commit(); + + instr_stmt_diff_wait_events(); + u_sess->statement_cxt.enable_wait_events_bitmap = false; + print_stmt_debug_log(); + + if (!commit_delay) { + commit_metirc_context(); + } } void release_statement_context(PgBackendStatus* beentry, const char* func, int line) diff --git a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp index 09ec6cdfeb..6479ac1e27 100755 --- a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp @@ -554,10 +554,13 @@ static HeapTuple GetStatementTuple(Relation rel, StatementStatContext* statement set_stmt_advise(statementInfo, values, nulls, &i); /* time info addition */ values[i++] = Int64GetDatum(statementInfo->timeModel[NET_SEND_TIME]); - for (int num = TOTAL_TIME_INFO_TYPES_P1; num < TOTAL_TIME_INFO_TYPES; num++) { + for (int num = TOTAL_TIME_INFO_TYPES_P1; num < TOTAL_TIME_INFO_TYPES_P2; num++) { values[i++] = Int64GetDatum(statementInfo->timeModel[num]); } values[i++] = Int64GetDatum(statementInfo->parent_query_id); + for (int num = TOTAL_TIME_INFO_TYPES_P2; num < TOTAL_TIME_INFO_TYPES; num++) { + values[i++] = Int64GetDatum(statementInfo->timeModel[num]); + } Assert(INSTR_STATEMENT_ATTRNUM == i); return heap_form_tuple(RelationGetDescr(rel), values, nulls); } diff --git a/src/gausskernel/process/postmaster/og_record_time.cpp b/src/gausskernel/process/postmaster/og_record_time.cpp index 1f19c5bdba..1dcc124e01 100644 --- a/src/gausskernel/process/postmaster/og_record_time.cpp +++ b/src/gausskernel/process/postmaster/og_record_time.cpp @@ -50,6 +50,7 @@ const char* TimeInfoTypeName[] = { "SRT13_BEFORE_QUERY", "SRT14_AFTER_QUERY", "RTT_UNKNOWN", + "NET_TRANS_TIME", "NET_SEND_TIMES", "NET_SEND_N_CALLS", "NET_SEND_SIZE", @@ -541,6 +542,7 @@ void OgRecordStat::report_start(const OgTimeDataVo& data_record) void OgRecordStat::report_end(const OgTimeDataVo& record) { + // assert not records_stack.is_empty() OgTimeDataVo& time_vo = records_stack.top(); if (record != time_vo) { diff --git a/src/gausskernel/process/postmaster/pgstat.cpp b/src/gausskernel/process/postmaster/pgstat.cpp index e970e6f24e..c3d1355f19 100644 --- a/src/gausskernel/process/postmaster/pgstat.cpp +++ b/src/gausskernel/process/postmaster/pgstat.cpp @@ -8373,6 +8373,11 @@ void ResetMemory(void* dest, size_t size) securec_check(rc, "\0", "\0"); } +bool nettime_trace_is_working() +{ + return u_sess->statement_cxt.remote_support_trace && u_sess->attr.attr_common.enable_record_nettime; +} + void timeInfoRecordStart(void) { if (!og_time_record_start()) { @@ -8382,12 +8387,11 @@ void timeInfoRecordStart(void) u_sess->stat_cxt.localTimeInfoArray[CPU_TIME] = getCpuTime(); } -void timeInfoRecordEnd(void) +void timeInfoRecordEnd(bool update_delay) { if (!og_time_record_is_started()) { return; } - t_thrd.shemem_ptr_cxt.mySessionTimeEntry->changeCount++; if (u_sess->attr.attr_common.enable_instr_cpu_timer) { int64 cur = getCpuTime(); @@ -8396,7 +8400,17 @@ void timeInfoRecordEnd(void) } og_time_record_end(); og_get_record_stat()->print_self(); + if (u_sess->statement_cxt.nettime_trace_is_working && CURRENT_STMT_METRIC_HANDLE) { + u_sess->statement_cxt.total_db_time += u_sess->stat_cxt.localTimeInfoArray[DB_TIME]; + } + if (!update_delay) { + update_sql_state(); + } + +} +void update_sql_state(void) { + t_thrd.shemem_ptr_cxt.mySessionTimeEntry->changeCount++; addThreadTimeEntry(); t_thrd.shemem_ptr_cxt.mySessionTimeEntry->changeCount++; Assert((t_thrd.shemem_ptr_cxt.mySessionTimeEntry->changeCount & 1) == 0); diff --git a/src/gausskernel/process/tcop/dest.cpp b/src/gausskernel/process/tcop/dest.cpp index 9d1794d13f..0db1b7c32e 100644 --- a/src/gausskernel/process/tcop/dest.cpp +++ b/src/gausskernel/process/tcop/dest.cpp @@ -179,6 +179,13 @@ DestReceiver* CreateDestReceiver(CommandDest dest) return &donothingDR; } +void send_dbtime_to_driver(int64 db_time) { + StringInfoData msgbuf; + pq_beginmessage(&msgbuf, 'K'); + pq_sendint64(&msgbuf, db_time); + pq_endmessage(&msgbuf); +} + /* ---------------- * EndCommand - clean up the destination at end of command * ---------------- diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index 1a8ea3221d..1094608a53 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -607,6 +607,7 @@ int SocketBackend(StringInfo inBuf) case 'I': /* Push, Pop schema name */ case 'L': /* Link gc_fdw */ case 'J': /* Trace ID */ + case 'V': /* client conn driver support trace info*/ break; case 'X': /* terminate */ @@ -619,6 +620,7 @@ int SocketBackend(StringInfo inBuf) case 'C': /* close */ case 'D': /* describe */ case 'E': /* execute */ + case 'K': /* client conn driver net_time */ case 'H': /* flush */ case 'P': /* parse */ u_sess->postgres_cxt.doing_extended_query_message = true; @@ -6621,9 +6623,14 @@ void ProcessInterrupts(void) pgstat_report_activity(STATE_IDLE, NULL); ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), errmsg("terminating snapshot process due to administrator command"))); - } else + } else { + /* handle when remote conn lost like session timeout */ + if (!u_sess->statement_cxt.previous_stmt_flushed) { + handle_commit_previous_metirc_context(); + } ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), errmsg("terminating connection due to administrator command"))); + } } if (t_thrd.int_cxt.ClientConnectionLost && !u_sess->stream_cxt.in_waiting_quit) { t_thrd.int_cxt.QueryCancelPending = false; /* lost connection trumps QueryCancel */ @@ -7738,6 +7745,56 @@ void LoadSqlPlugin() } #endif +/* handle and commit previous stmt metric context in jdbc trace mode. */ +void handle_commit_previous_metirc_context() { + /* handle local time info to metricContext*/ + update_sql_state(); + /* commit metricContext to statementFlush */ + commit_metirc_context(); +} + +/* handle when connection closed*/ +void deal_fronted_lost() + { + /* unified auditing logout */ + audit_processlogout_unified(); + + /* + * isSingleMode means we are doing initdb. Some temp tables + * will be created to store intermediate result, so should do cleaning + * when finished. + * xc_maintenance_mode is for cluster resize, temp table created + * during it should be clean too. + * Drop temp schema if IS_SINGLE_NODE. + */ + RemoveTempNamespace(); + + InitThreadLocalWhenSessionExit(); + + if (IS_THREAD_POOL_WORKER) { + (void)gs_signal_block_sigusr2(); + t_thrd.threadpool_cxt.worker->CleanUpSession(false); + (void)gs_signal_unblock_sigusr2(); + return; + } else { + /* + * Reset whereToSendOutput to prevent ereport from attempting + * to send any more messages to client. + */ + if (t_thrd.postgres_cxt.whereToSendOutput == DestRemote) + t_thrd.postgres_cxt.whereToSendOutput = DestNone; + + /* + * NOTE: if you are tempted to add more code here, DON'T! + * Whatever you had in mind to do should be set up as an + * on_proc_exit or on_shmem_exit callback, instead. Otherwise + * it will fail to be called during other backend-shutdown + * scenarios. + */ + proc_exit(0); + } + } + /* ---------------------------------------------------------------- * PostgresMain * openGauss main loop -- all backends, interactive or otherwise start here @@ -8605,6 +8662,8 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam bool template0_locked = false; OgRecordOperator _local_tmp_opt(false, SRT13_BEFORE_QUERY); OgRecordOperator _local_tmp_opt1(false, SRT14_AFTER_QUERY); + + bool query_started = false; /* * Non-error queries loop here. */ @@ -8732,6 +8791,18 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam pgstat_report_unique_sql_id(true); u_sess->trace_cxt.trace_id[0] = '\0'; + + /* update our elapsed time statistics. */ + if (og_time_record_is_started()) { + _local_tmp_opt.exit(); + } + timeInfoRecordEnd(u_sess->statement_cxt.nettime_trace_is_working); + + if (u_sess->statement_cxt.nettime_trace_is_working) { + send_dbtime_to_driver(u_sess->statement_cxt.total_db_time); + u_sess->statement_cxt.total_db_time = 0; + } + /* * If connection to client is lost, we do not need to send message to client. */ @@ -8762,11 +8833,6 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam UnsetGlobalSnapshotData(); } #endif - /* update our elapsed time statistics. */ - if (og_time_record_is_started()) { - _local_tmp_opt.exit(); - } - timeInfoRecordEnd(); /* reset unique_sql_id & stat * @@ -8776,9 +8842,13 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam * lost the unique sql entry. */ ReportQueryStatus(); - statement_commit_metirc_context(); - ResetCurrentUniqueSQL(); - + statement_commit_metirc_context(u_sess->statement_cxt.nettime_trace_is_working); + if (u_sess->statement_cxt.nettime_trace_is_working) { + u_sess->statement_cxt.previous_stmt_flushed = false; + } else { + ResetCurrentUniqueSQL(); + } + query_started = false; send_ready_for_query = false; } else { /* update our elapsed time statistics. */ @@ -8886,6 +8956,11 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam #ifdef USE_SPQ t_thrd.spq_ctx.spq_role = ROLE_UTILITY; #endif + if (!query_started) { + query_started = true; + u_sess->statement_cxt.nettime_trace_is_working = nettime_trace_is_working(); + } + /* update our elapsed time statistics. */ timeInfoRecordStart(); _local_tmp_opt1.enter(); @@ -9166,6 +9241,11 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam pq_getmsgend(&input_message); pgstatCountSQL4SessionLevel(); + + if (!u_sess->statement_cxt.previous_stmt_flushed) { + handle_commit_previous_metirc_context(); + u_sess->statement_cxt.previous_stmt_flushed = true; + } statement_init_metric_context(); #ifdef USE_RETRY_STUB if (IsStmtRetryEnabled()) { @@ -9928,50 +10008,44 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam } } break; + case 'K': /* client conn info net_time*/ + { + int64 net_trans_time = pq_getmsgint64(&input_message); + u_sess->stat_cxt.localTimeInfoArray[NET_TRANS_TIME] = net_trans_time; + handle_commit_previous_metirc_context(); + u_sess->statement_cxt.previous_stmt_flushed = true; + ResetCurrentUniqueSQL(); + + } break; + /* - * 'X' means that the frontend is closing down the socket. EOF - * means unexpected loss of frontend connection. Either way, - * perform normal shutdown. + * 'V' means remote client driver support trace, for low version server, + * client does not send this message. */ - case 'X': - case EOF: - /* unified auditing logout */ - audit_processlogout_unified(); + case 'V': + u_sess->statement_cxt.remote_support_trace = true; + query_started = false; + break; /* - * isSingleMode means we are doing initdb. Some temp tables - * will be created to store intermediate result, so should do cleaning - * when finished. - * xc_maintenance_mode is for cluster resize, temp table created - * during it should be clean too. - * Drop temp schema if IS_SINGLE_NODE. + * 'X' means that the frontend is closing down the socket. + * means unexpected loss of frontend connection. perform + * normal shutdown. */ - RemoveTempNamespace(); - - InitThreadLocalWhenSessionExit(); - - if (IS_THREAD_POOL_WORKER) { - (void)gs_signal_block_sigusr2(); - t_thrd.threadpool_cxt.worker->CleanUpSession(false); - (void)gs_signal_unblock_sigusr2(); - break; - } else { - /* - * Reset whereToSendOutput to prevent ereport from attempting - * to send any more messages to client. - */ - if (t_thrd.postgres_cxt.whereToSendOutput == DestRemote) - t_thrd.postgres_cxt.whereToSendOutput = DestNone; - - /* - * NOTE: if you are tempted to add more code here, DON'T! - * Whatever you had in mind to do should be set up as an - * on_proc_exit or on_shmem_exit callback, instead. Otherwise - * it will fail to be called during other backend-shutdown - * scenarios. - */ - proc_exit(0); + case 'X': + deal_fronted_lost(); + break; + + /* EOF means unexpected loss of frontend connection. Either way, + * perform normal shutdown. + */ + case EOF: + if (!u_sess->statement_cxt.previous_stmt_flushed) { + handle_commit_previous_metirc_context(); } + deal_fronted_lost(); + break; + /* fall through */ case 'd': /* copy data */ case 'c': /* copy done */ diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 6717a35169..404f69efcb 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -1172,6 +1172,11 @@ static void knl_u_statement_init(knl_u_statement_context* statement_cxt) statement_cxt->query_plan_threshold_active = false; statement_cxt->is_exceed_query_plan_threshold = false; statement_cxt->record_query_plan_fin_time = 0; + + statement_cxt->remote_support_trace = false; + statement_cxt->previous_stmt_flushed = true; + statement_cxt->nettime_trace_is_working = false; + statement_cxt->total_db_time = 0; } void knl_u_relmap_init(knl_u_relmap_context* relmap_cxt) diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql new file mode 100644 index 0000000000..3aa80da32d --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql @@ -0,0 +1,695 @@ +DO $DO$ +DECLARE +ans boolean; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(); +DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(); +DROP VIEW IF EXISTS DBE_PERF.statement_history; +END IF; +END $DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; +DROP VIEW IF EXISTS DBE_PERF.statement CASCADE; +DROP VIEW IF EXISTS DBE_PERF.summary_statement CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.get_instr_unique_sql() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5702; + +CREATE FUNCTION pg_catalog.get_instr_unique_sql +( + OUT node_name name, + OUT node_id integer, + OUT user_name name, + OUT user_id oid, + OUT unique_sql_id bigint, + OUT query text, + OUT n_calls bigint, + OUT min_elapse_time bigint, + OUT max_elapse_time bigint, + OUT total_elapse_time bigint, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + Out net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT last_updated timestamp with time zone, + OUT sort_count bigint, + OUT sort_time bigint, + OUT sort_mem_used bigint, + OUT sort_spill_count bigint, + OUT sort_spill_size bigint, + OUT hash_count bigint, + OUT hash_time bigint, + OUT hash_mem_used bigint, + OUT hash_spill_count bigint, + OUT hash_spill_size bigint, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; + +CREATE VIEW DBE_PERF.statement AS + SELECT * FROM get_instr_unique_sql(); + +DROP FUNCTION IF EXISTS dbe_perf.get_summary_statement() cascade; +CREATE OR REPLACE FUNCTION dbe_perf.get_summary_statement() +RETURNS setof dbe_perf.statement +AS $$ +DECLARE + row_data dbe_perf.statement%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +CREATE VIEW DBE_PERF.summary_statement AS + SELECT * FROM DBE_PERF.get_summary_statement(); + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean, + trace_id text, + advise text, + net_send_time bigint, + srt1_q bigint, + srt2_simple_query bigint, + srt3_analyze_rewrite bigint, + srt4_plan_query bigint, + srt5_light_query bigint, + srt6_p bigint, + srt7_b bigint, + srt8_e bigint, + srt9_d bigint, + srt10_s bigint, + srt11_c bigint, + srt12_u bigint, + srt13_before_query bigint, + srt14_after_query bigint, + rtt_unknown bigint, + parent_query_id bigint +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean, timestamp with time zone[]); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history_1v$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + VARIADIC finish_time timestamp with time zone[], + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history$function$; + +DO $DO$ +DECLARE +ans boolean; + username text; + querystr text; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +CREATE VIEW DBE_PERF.statement_history AS +select * from pg_catalog.statement_history; +CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''''; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 then + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true'; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 THEN + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + SELECT SESSION_USER INTO username; + IF EXISTS(SELECT oid from pg_catalog.pg_class where relname='statement_history') then + querystr := 'REVOKE SELECT on table dbe_perf.statement_history from public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + END IF; +END $DO$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql new file mode 100644 index 0000000000..3aa80da32d --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql @@ -0,0 +1,695 @@ +DO $DO$ +DECLARE +ans boolean; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(); +DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(); +DROP VIEW IF EXISTS DBE_PERF.statement_history; +END IF; +END $DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; +DROP VIEW IF EXISTS DBE_PERF.statement CASCADE; +DROP VIEW IF EXISTS DBE_PERF.summary_statement CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.get_instr_unique_sql() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5702; + +CREATE FUNCTION pg_catalog.get_instr_unique_sql +( + OUT node_name name, + OUT node_id integer, + OUT user_name name, + OUT user_id oid, + OUT unique_sql_id bigint, + OUT query text, + OUT n_calls bigint, + OUT min_elapse_time bigint, + OUT max_elapse_time bigint, + OUT total_elapse_time bigint, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + Out net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT last_updated timestamp with time zone, + OUT sort_count bigint, + OUT sort_time bigint, + OUT sort_mem_used bigint, + OUT sort_spill_count bigint, + OUT sort_spill_size bigint, + OUT hash_count bigint, + OUT hash_time bigint, + OUT hash_mem_used bigint, + OUT hash_spill_count bigint, + OUT hash_spill_size bigint, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; + +CREATE VIEW DBE_PERF.statement AS + SELECT * FROM get_instr_unique_sql(); + +DROP FUNCTION IF EXISTS dbe_perf.get_summary_statement() cascade; +CREATE OR REPLACE FUNCTION dbe_perf.get_summary_statement() +RETURNS setof dbe_perf.statement +AS $$ +DECLARE + row_data dbe_perf.statement%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +CREATE VIEW DBE_PERF.summary_statement AS + SELECT * FROM DBE_PERF.get_summary_statement(); + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean, + trace_id text, + advise text, + net_send_time bigint, + srt1_q bigint, + srt2_simple_query bigint, + srt3_analyze_rewrite bigint, + srt4_plan_query bigint, + srt5_light_query bigint, + srt6_p bigint, + srt7_b bigint, + srt8_e bigint, + srt9_d bigint, + srt10_s bigint, + srt11_c bigint, + srt12_u bigint, + srt13_before_query bigint, + srt14_after_query bigint, + rtt_unknown bigint, + parent_query_id bigint +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean, timestamp with time zone[]); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history_1v$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + VARIADIC finish_time timestamp with time zone[], + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history$function$; + +DO $DO$ +DECLARE +ans boolean; + username text; + querystr text; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +CREATE VIEW DBE_PERF.statement_history AS +select * from pg_catalog.statement_history; +CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''''; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 then + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true'; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 THEN + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + SELECT SESSION_USER INTO username; + IF EXISTS(SELECT oid from pg_catalog.pg_class where relname='statement_history') then + querystr := 'REVOKE SELECT on table dbe_perf.statement_history from public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + END IF; +END $DO$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql new file mode 100644 index 0000000000..1f669cc58b --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql @@ -0,0 +1,703 @@ +DO $DO$ +DECLARE +ans boolean; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(); +DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(); +DROP VIEW IF EXISTS DBE_PERF.statement_history; +END IF; +END $DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; +DROP VIEW IF EXISTS DBE_PERF.statement CASCADE; +DROP VIEW IF EXISTS DBE_PERF.summary_statement CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.get_instr_unique_sql() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5702; + +CREATE FUNCTION pg_catalog.get_instr_unique_sql +( + OUT node_name name, + OUT node_id integer, + OUT user_name name, + OUT user_id oid, + OUT unique_sql_id bigint, + OUT query text, + OUT n_calls bigint, + OUT min_elapse_time bigint, + OUT max_elapse_time bigint, + OUT total_elapse_time bigint, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + Out net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT last_updated timestamp with time zone, + OUT sort_count bigint, + OUT sort_time bigint, + OUT sort_mem_used bigint, + OUT sort_spill_count bigint, + OUT sort_spill_size bigint, + OUT hash_count bigint, + OUT hash_time bigint, + OUT hash_mem_used bigint, + OUT hash_spill_count bigint, + OUT hash_spill_size bigint, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT net_trans_time bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; + +CREATE VIEW DBE_PERF.statement AS + SELECT * FROM get_instr_unique_sql(); + +DROP FUNCTION IF EXISTS dbe_perf.get_summary_statement() cascade; +CREATE OR REPLACE FUNCTION dbe_perf.get_summary_statement() +RETURNS setof dbe_perf.statement +AS $$ +DECLARE + row_data dbe_perf.statement%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +CREATE VIEW DBE_PERF.summary_statement AS + SELECT * FROM DBE_PERF.get_summary_statement(); + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean, + trace_id text, + advise text, + net_send_time bigint, + srt1_q bigint, + srt2_simple_query bigint, + srt3_analyze_rewrite bigint, + srt4_plan_query bigint, + srt5_light_query bigint, + srt6_p bigint, + srt7_b bigint, + srt8_e bigint, + srt9_d bigint, + srt10_s bigint, + srt11_c bigint, + srt12_u bigint, + srt13_before_query bigint, + srt14_after_query bigint, + rtt_unknown bigint, + parent_query_id bigint, + net_trans_time bigint +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean, timestamp with time zone[]); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint, + OUT net_trans_time bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history_1v$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + VARIADIC finish_time timestamp with time zone[], + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint, + OUT net_trans_time bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history$function$; + +DO $DO$ +DECLARE +ans boolean; + username text; + querystr text; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +CREATE VIEW DBE_PERF.statement_history AS +select * from pg_catalog.statement_history; +CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT net_trans_time bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''''; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 then + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + net_trans_time := row_data.net_trans_time; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT net_trans_time bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true'; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 THEN + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + net_trans_time := row_data.net_trans_time; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + SELECT SESSION_USER INTO username; + IF EXISTS(SELECT oid from pg_catalog.pg_class where relname='statement_history') then + querystr := 'REVOKE SELECT on table dbe_perf.statement_history from public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + END IF; +END $DO$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql new file mode 100644 index 0000000000..1f669cc58b --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql @@ -0,0 +1,703 @@ +DO $DO$ +DECLARE +ans boolean; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(); +DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(); +DROP VIEW IF EXISTS DBE_PERF.statement_history; +END IF; +END $DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; +DROP VIEW IF EXISTS DBE_PERF.statement CASCADE; +DROP VIEW IF EXISTS DBE_PERF.summary_statement CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.get_instr_unique_sql() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5702; + +CREATE FUNCTION pg_catalog.get_instr_unique_sql +( + OUT node_name name, + OUT node_id integer, + OUT user_name name, + OUT user_id oid, + OUT unique_sql_id bigint, + OUT query text, + OUT n_calls bigint, + OUT min_elapse_time bigint, + OUT max_elapse_time bigint, + OUT total_elapse_time bigint, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + Out net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT last_updated timestamp with time zone, + OUT sort_count bigint, + OUT sort_time bigint, + OUT sort_mem_used bigint, + OUT sort_spill_count bigint, + OUT sort_spill_size bigint, + OUT hash_count bigint, + OUT hash_time bigint, + OUT hash_mem_used bigint, + OUT hash_spill_count bigint, + OUT hash_spill_size bigint, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT net_trans_time bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; + +CREATE VIEW DBE_PERF.statement AS + SELECT * FROM get_instr_unique_sql(); + +DROP FUNCTION IF EXISTS dbe_perf.get_summary_statement() cascade; +CREATE OR REPLACE FUNCTION dbe_perf.get_summary_statement() +RETURNS setof dbe_perf.statement +AS $$ +DECLARE + row_data dbe_perf.statement%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +CREATE VIEW DBE_PERF.summary_statement AS + SELECT * FROM DBE_PERF.get_summary_statement(); + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean, + trace_id text, + advise text, + net_send_time bigint, + srt1_q bigint, + srt2_simple_query bigint, + srt3_analyze_rewrite bigint, + srt4_plan_query bigint, + srt5_light_query bigint, + srt6_p bigint, + srt7_b bigint, + srt8_e bigint, + srt9_d bigint, + srt10_s bigint, + srt11_c bigint, + srt12_u bigint, + srt13_before_query bigint, + srt14_after_query bigint, + rtt_unknown bigint, + parent_query_id bigint, + net_trans_time bigint +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); +DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean, timestamp with time zone[]); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint, + OUT net_trans_time bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history_1v$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119; +CREATE FUNCTION dbe_perf.standby_statement_history( + IN only_slow boolean, + VARIADIC finish_time timestamp with time zone[], + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT parent_query_id bigint, + OUT net_trans_time bigint) +RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000 +LANGUAGE internal AS $function$standby_statement_history$function$; + +DO $DO$ +DECLARE +ans boolean; + username text; + querystr text; +BEGIN +select case when count(*) = 1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; +IF ans = true then +CREATE VIEW DBE_PERF.statement_history AS +select * from pg_catalog.statement_history; +CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT net_trans_time bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''''; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 then + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + net_trans_time := row_data.net_trans_time; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + ( IN start_timestamp timestamp with time zone, + IN end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint , + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, + OUT net_send_time bigint, + OUT srt1_q bigint, + OUT srt2_simple_query bigint, + OUT srt3_analyze_rewrite bigint, + OUT srt4_plan_query bigint, + OUT srt5_light_query bigint, + OUT srt6_p bigint, + OUT srt7_b bigint, + OUT srt8_e bigint, + OUT srt9_d bigint, + OUT srt10_s bigint, + OUT srt11_c bigint, + OUT srt12_u bigint, + OUT srt13_before_query bigint, + OUT srt14_after_query bigint, + OUT rtt_unknown bigint, + OUT net_trans_time bigint + ) + returns setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + query_str_nodes text; + BEGIN + -- Get all node names(CN + master DN) + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true'; + FOR row_data IN EXECUTE(query_str) LOOP + IF row_data.parent_query_id = 0 THEN + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + net_trans_time := row_data.net_trans_time; + return next; + END IF; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + SELECT SESSION_USER INTO username; + IF EXISTS(SELECT oid from pg_catalog.pg_class where relname='statement_history') then + querystr := 'REVOKE SELECT on table dbe_perf.statement_history from public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + END IF; +END $DO$; \ No newline at end of file diff --git a/src/include/instruments/instr_handle_mgr.h b/src/include/instruments/instr_handle_mgr.h index 6aeabb7e19..e81d3dc626 100644 --- a/src/include/instruments/instr_handle_mgr.h +++ b/src/include/instruments/instr_handle_mgr.h @@ -39,7 +39,8 @@ void statement_init_metric_context(); void statement_init_metric_context_if_needs(); -void statement_commit_metirc_context(); +void statement_commit_metirc_context(bool commit_delay = false); +void commit_metirc_context(); void release_statement_context(PgBackendStatus* beentry, const char* func, int line); void* bind_statement_context(); diff --git a/src/include/knl/knl_guc/knl_session_attr_common.h b/src/include/knl/knl_guc/knl_session_attr_common.h index 2c342de23a..0ba000ec68 100644 --- a/src/include/knl/knl_guc/knl_session_attr_common.h +++ b/src/include/knl/knl_guc/knl_session_attr_common.h @@ -248,6 +248,7 @@ typedef struct knl_session_attr_common { char* delimiter_name; bool b_compatibility_user_host_auth; int time_record_level; + bool enable_record_nettime; } knl_session_attr_common; #endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_COMMON_H_ */ diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index a0339b5c11..607d2004cb 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -2363,6 +2363,15 @@ typedef struct knl_u_statement_context { bool query_plan_threshold_active; /* active if need start query_plan threshold timer */ bool is_exceed_query_plan_threshold; /* if true when slow sql take effect */ TimestampTz record_query_plan_fin_time; /* finish time when execute time exceed log_min_duration_statement */ + + /* whether remote jdbc rupport trace */ + bool remote_support_trace; + /* whether previous statement flushed to statement_history */ + bool previous_stmt_flushed; + /* if enable jdbc trace */ + bool nettime_trace_is_working; + /* record total db_time like execute(sql1;sql2) */ + int64 total_db_time; } knl_u_statement_context; struct Qid_key { @@ -3143,6 +3152,7 @@ typedef struct knl_session_context { /* standby write. */ knl_u_libsw_context libsw_cxt; + } knl_session_context; enum stp_xact_err_type { diff --git a/src/include/og_record_time.h b/src/include/og_record_time.h index 6cb19286b1..25e08ae938 100644 --- a/src/include/og_record_time.h +++ b/src/include/og_record_time.h @@ -61,11 +61,13 @@ typedef enum TimeInfoType { SRT13_BEFORE_QUERY, SRT14_AFTER_QUERY, RTT_UNKNOWN, + NET_TRANS_TIME, TOTAL_TIME_INFO_TYPES } TimeInfoType; // some procedure use old postion, so we define this. const TimeInfoType TOTAL_TIME_INFO_TYPES_P1 = SRT1_Q; +const TimeInfoType TOTAL_TIME_INFO_TYPES_P2 = NET_TRANS_TIME; typedef enum NetInfoType { NET_SEND_TIMES, @@ -178,6 +180,7 @@ const char* og_record_time_type_str(const RecordType& record_type); */ const char* og_record_time_type_str(int pos); + #ifdef _cplusplus } #endif diff --git a/src/include/pgstat.h b/src/include/pgstat.h index ec6264b522..3287c61562 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -2559,7 +2559,11 @@ extern Size sessionTimeShmemSize(void); extern void sessionTimeShmemInit(void); extern void timeInfoRecordStart(void); -extern void timeInfoRecordEnd(void); +extern void timeInfoRecordEnd(bool update_delay = false); + +extern void update_sql_state(void); + +extern bool nettime_trace_is_working(); extern void getSessionTimeStatus(Tuplestorestate *tupStore, TupleDesc tupDesc, void (*insert)(Tuplestorestate *tupStore, TupleDesc tupDesc, const SessionTimeEntry *entry)); diff --git a/src/include/postgres.h b/src/include/postgres.h index 429da1baab..15d4ade6d0 100644 --- a/src/include/postgres.h +++ b/src/include/postgres.h @@ -993,6 +993,8 @@ extern void cJSON_internal_free(void* pointer); extern void InitThreadLocalWhenSessionExit(); extern void RemoveTempNamespace(); +extern void deal_fronted_lost(); +extern void handle_commit_previous_metirc_context(); #ifndef ENABLE_MULTIPLE_NODES #define CacheIsProcNameArgNsp(cc_id) ((cc_id) == PROCNAMEARGSNSP || (cc_id) == PROCALLARGS) #else diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h index b9dec2061f..c4b0efd938 100644 --- a/src/include/tcop/dest.h +++ b/src/include/tcop/dest.h @@ -169,6 +169,7 @@ extern DestReceiver* CreateDestReceiver(CommandDest dest); extern DestReceiver* CreateReceiverForMerge(CommandDest dest); extern void EndCommand(const char* commandTag, CommandDest dest); extern void EndCommand_noblock(const char* commandTag, CommandDest dest); +extern void send_dbtime_to_driver(int64 db_time); /* Additional functions that go with destination management, more or less. */ diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index c073b44de5..1c591ae772 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -318,6 +318,8 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c enable_partitionwise | bool | | | enable_pbe_optimization | bool | | | enable_prevent_job_task_startup | bool | | | + enable_proc_coverage | bool | | | + enable_record_nettime | bool | | | enable_recyclebin | bool | | | enable_remote_excute | bool | | | enable_resource_record | bool | | | -- Gitee From 8eeec8f530fa95a3fd914c37ccfc187f80cb2c3b Mon Sep 17 00:00:00 2001 From: q00421813 Date: Mon, 29 Jul 2024 08:45:28 +0800 Subject: [PATCH 111/347] =?UTF-8?q?1.=E4=BF=AE=E5=A4=8Dbug=5Fundo=E9=A1=B5?= =?UTF-8?q?=E9=9D=A2=E5=A4=8D=E7=94=A8=E5=9C=BA=E6=99=AF=E4=B8=8B,?= =?UTF-8?q?=E5=BD=93checkpoint=E7=82=B9=E6=99=9A=E4=BA=8Eundo=E9=A1=B5?= =?UTF-8?q?=E9=9D=A2=E5=A4=8D=E7=94=A8=E4=B8=94=E6=AD=A4=E6=97=B6=E5=8F=91?= =?UTF-8?q?=E7=94=9Fredo=E5=90=8E=E5=87=BA=E7=8E=B0urecptr=E4=B8=8Exlog?= =?UTF-8?q?=E8=AE=B0=E5=BD=95=E4=B8=AD=E7=9A=84urecptr=E4=B8=8D=E4=B8=80?= =?UTF-8?q?=E8=87=B4=E7=9A=84=E6=83=85=E5=86=B5=202.astore=E8=A1=A8?= =?UTF-8?q?=E5=9C=A8enable=5Frecyclebin=3Don=E5=9C=BA=E6=99=AF=E4=B8=8B?= =?UTF-8?q?=E4=B8=8D=E6=94=AF=E6=8C=81=E5=88=9B=E5=BB=BA=E6=AE=B5=E9=A1=B5?= =?UTF-8?q?=E5=BC=8F=E8=A1=A8=203.=E4=BF=AE=E5=A4=8Dgs=5Fundo=5Fdump=5Fxid?= =?UTF-8?q?=E8=A7=86=E5=9B=BExid=E6=A0=A1=E9=AA=8C=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/pgundostatfuncs.cpp | 4 ++-- src/gausskernel/optimizer/commands/tablecmds.cpp | 8 +++++++- .../storage/access/ustore/knl_uextremeredo.cpp | 8 ++++---- src/gausskernel/storage/access/ustore/knl_uredo.cpp | 4 ++-- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/common/backend/utils/adt/pgundostatfuncs.cpp b/src/common/backend/utils/adt/pgundostatfuncs.cpp index 55157e9f0e..6fb4c68319 100644 --- a/src/common/backend/utils/adt/pgundostatfuncs.cpp +++ b/src/common/backend/utils/adt/pgundostatfuncs.cpp @@ -1912,7 +1912,7 @@ Datum gs_undo_translot_dump_xid(PG_FUNCTION_ARGS) TransactionId xid = (TransactionId)PG_GETARG_TRANSACTIONID(0); bool read_memory = PG_GETARG_INT32(1); - if (!TransactionIdIsValid(xid)) { + if (!TransactionIdIsValid(xid) || xid >= t_thrd.xact_cxt.ShmemVariableCache->nextXid) { elog(ERROR, "xid is invalid"); PG_RETURN_VOID(); } @@ -2012,7 +2012,7 @@ Datum gs_undo_dump_xid(PG_FUNCTION_ARGS) PG_RETURN_VOID(); #else TransactionId xid = (TransactionId)PG_GETARG_TRANSACTIONID(0); - if (!TransactionIdIsValid(xid)) { + if (!TransactionIdIsValid(xid) || xid >= t_thrd.xact_cxt.ShmemVariableCache->nextXid) { elog(ERROR, "xid is invalid"); PG_RETURN_VOID(); } diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 7e3061fab6..b2a48ac99e 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -2895,7 +2895,7 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object if (!IsInitdb && (relkind == RELKIND_RELATION) && !IsSystemNamespace(namespaceId) && !IsCStoreNamespace(namespaceId) && (pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) && - (stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT)) { + (stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT) && !u_sess->attr.attr_storage.enable_recyclebin) { bool isSegmentType = (storage_type == SEGMENT_PAGE); if (!isSegmentType && (u_sess->attr.attr_storage.enable_segment || bucketinfo != NULL)) { storage_type = SEGMENT_PAGE; @@ -2904,6 +2904,12 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object reloptions = transformRelOptions((Datum)0, stmt->options, NULL, validnsps, true, false); } } else if (storage_type == SEGMENT_PAGE) { + if (u_sess->attr.attr_storage.enable_recyclebin) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_SEGMENT_PAGE), + errmsg("The table %s do not support segment-page storage", stmt->relation->relname), + errdetail("Segment-page storage doest not support recyclebin"), + errhint("set enable_recyclebin = off before using segmnet-page storage."))); + } ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("The table %s do not support segment storage", stmt->relation->relname))); diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp index abe3c9f1d9..48d5bd9ee5 100644 --- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp @@ -1685,7 +1685,7 @@ static void RedoUndoInsertBlock(XLogBlockHead *blockhead, XLogBlockUndoParse *bl urecptr = UHeapPrepareUndoInsert(xlundohdr->relOid, xlundohdrextra->partitionOid, relNode, spcNode, UNDO_PERMANENT, recxid, 0, xlundohdrextra->blkprev, xlundohdrextra->prevurp, blockdatarec->insertUndoParse.blkno, xlundohdr, xlundometa); - Assert(urecptr == xlundohdr->urecptr); + Assert(UNDO_PTR_GET_OFFSET(urecptr) == UNDO_PTR_GET_OFFSET(xlundohdr->urecptr)); undorec->SetOffset(blockdatarec->insertUndoParse.offnum); if (!skipInsert) { InsertPreparedUndo(t_thrd.ustore_cxt.urecvec, lsn); @@ -1764,7 +1764,7 @@ static void RedoUndoDeleteBlock(XLogBlockHead *blockhead, XLogBlockUndoParse *bl xlundohdrextra->hasSubXact ? TopSubTransactionId : InvalidSubTransactionId, 0, xlundohdrextra->blkprev, xlundohdrextra->prevurp, &oldTD, &utup, blockdatarec->deleteUndoParse.blkno, xlundohdr, xlundometa); - Assert(urecptr == xlundohdr->urecptr); + Assert(UNDO_PTR_GET_OFFSET(urecptr) == UNDO_PTR_GET_OFFSET(xlundohdr->urecptr)); undorec->SetOffset(blockdatarec->deleteUndoParse.offnum); if (!skipInsert) { /* Insert the Undo record into the undo store */ @@ -1851,7 +1851,7 @@ static void RedoUndoUpdateBlock(XLogBlockHead *blockhead, XLogBlockUndoParse *bl inplaceUpdate ? xlundohdrextra->blkprev : xlnewundohdrextra->blkprev, xlundohdrextra->prevurp, &oldTD, &oldtup, inplaceUpdate, &newUrecptr, blockdatarec->updateUndoParse.undoXorDeltaSize, blockdatarec->updateUndoParse.oldblk, blockdatarec->updateUndoParse.newblk, xlundohdr, xlundometa); - Assert(urecptr == xlundohdr->urecptr); + Assert(UNDO_PTR_GET_OFFSET(urecptr) == UNDO_PTR_GET_OFFSET(xlundohdr->urecptr)); if (!skipInsert) { if (!inplaceUpdate) { @@ -1956,7 +1956,7 @@ static void RedoUndoMultiInsertBlock(XLogBlockHead *blockhead, XLogBlockUndoPars * undo should be inserted at same location as it was during the * actual insert (DO operation). */ - Assert((*urecvec)[0]->Urp() == xlundohdr->urecptr); + Assert(UNDO_PTR_GET_OFFSET((*urecvec)[0]->Urp()) == UNDO_PTR_GET_OFFSET(xlundohdr->urecptr)); InsertPreparedUndo(urecvec, lsn); } diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index ea1a22c3b1..5a9a46c10d 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -393,7 +393,7 @@ static UndoRecPtr PrepareAndInsertUndoRecordForDeleteRedo(XLogReaderState *recor InvalidBuffer, xlrec->offnum, xid, *hasSubXact ? TopSubTransactionId : InvalidSubTransactionId, 0, *blkprev, *prevurp, &oldTD, utup, blkno, xlundohdr, &undometa); - Assert(urecptr == xlundohdr->urecptr); + Assert(UNDO_PTR_GET_OFFSET(urecptr) == UNDO_PTR_GET_OFFSET(xlundohdr->urecptr)); undorec->SetOffset(xlrec->offnum); if (!skipInsert) { /* Insert the Undo record into the undo store */ @@ -968,7 +968,7 @@ static UndoRecPtr PrepareAndInsertUndoRecordForUpdateRedo(XLogReaderState *recor *hasSubXact ? TopSubTransactionId : InvalidSubTransactionId, 0, *blkprev, inplaceUpdate ? *blkprev : *newblkprev, *prevurp, &oldTD, oldtup, inplaceUpdate, &newUrecptr, undoXorDeltaSize, oldblk, newblk, xlundohdr, &undometa); - Assert(urecptr == xlundohdr->urecptr); + Assert(UNDO_PTR_GET_OFFSET(urecptr) == UNDO_PTR_GET_OFFSET(xlundohdr->urecptr)); if (!skipInsert) { if (!inplaceUpdate) { -- Gitee From edb57606dc53e0a18a9c0d309ab06a10008a0a77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=AE=E5=82=A8-=E7=8E=8B=E5=BB=BA=E8=BE=BE?= Date: Thu, 25 Jul 2024 15:57:29 +0800 Subject: [PATCH 112/347] =?UTF-8?q?=E5=A4=87=E4=BB=BD=E4=B8=8E=E6=81=A2?= =?UTF-8?q?=E5=A4=8D=E5=B7=A5=E5=85=B7=E5=AE=A1=E8=AE=A1=E6=97=A5=E5=BF=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_basebackup/pg_basebackup.cpp | 3 +- src/bin/pg_dump/pg_dump.cpp | 2 + src/bin/pg_dump/pg_dumpall.cpp | 2 + src/bin/pg_dump/pg_restore.cpp | 2 + src/bin/pg_probackup/CMakeLists.txt | 4 +- src/bin/pg_probackup/Makefile | 10 +- src/bin/pg_probackup/logger.cpp | 18 -- src/bin/pg_probackup/logger.h | 3 +- src/bin/pg_probackup/pg_probackup.cpp | 8 +- src/include/bin/elog.h | 12 ++ src/lib/elog/elog.cpp | 215 ++++++++++++++++++++++++ 11 files changed, 254 insertions(+), 25 deletions(-) diff --git a/src/bin/pg_basebackup/pg_basebackup.cpp b/src/bin/pg_basebackup/pg_basebackup.cpp index 793b0b065b..4dbb88b57e 100644 --- a/src/bin/pg_basebackup/pg_basebackup.cpp +++ b/src/bin/pg_basebackup/pg_basebackup.cpp @@ -1649,6 +1649,7 @@ int main(int argc, char **argv) { progname = get_progname(argv[0]); if (!strcmp(progname, "gs_basebackup")) { + init_audit(progname, argc, argv); return GsBaseBackup(argc, argv); } else if (!strcmp(progname, "gs_tar")) { return GsTar(argc, argv); @@ -2277,7 +2278,7 @@ static int GsBaseBackup(int argc, char** argv) free_basebackup(); pg_log(stderr, _("%s: base backup successfully\n"), progname); - + audit_success(); return 0; } diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 884a4059fb..9d44a2739f 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -746,6 +746,7 @@ int main(int argc, char** argv) // log output redirect init_log((char*)PROG_NAME); + init_audit(PROG_NAME, argc, argv); validatedumpoptions(); /* Identify archive format to emit */ @@ -1230,6 +1231,7 @@ int main(int argc, char** argv) encryptArchive(fout, archiveFormat); write_msg(NULL, "dump database %s successfully\n", dbname); + audit_success(); /* Database Security: Data importing/dumping support AES128. */ gettimeofday(&aes_end_time, NULL); diff --git a/src/bin/pg_dump/pg_dumpall.cpp b/src/bin/pg_dump/pg_dumpall.cpp index 7fb446002c..d4eea2d3a4 100644 --- a/src/bin/pg_dump/pg_dumpall.cpp +++ b/src/bin/pg_dump/pg_dumpall.cpp @@ -310,6 +310,7 @@ int main(int argc, char* argv[]) get_password_pipeline(); } + init_audit(PROG_NAME, argc, argv); /* validate the optons values */ validate_dumpall_options(argv); @@ -571,6 +572,7 @@ int main(int argc, char* argv[]) } write_msg(NULL, "dumpall operation successful\n"); + audit_success(); gettimeofday(&aes_end_time, NULL); total_time = diff --git a/src/bin/pg_dump/pg_restore.cpp b/src/bin/pg_dump/pg_restore.cpp index 5e3ed83185..e306ca36a7 100644 --- a/src/bin/pg_dump/pg_restore.cpp +++ b/src/bin/pg_dump/pg_restore.cpp @@ -260,6 +260,7 @@ int main(int argc, char** argv) (void)fclose(fp); fp = NULL; + init_audit(PROG_NAME, argc, argv); /* validate the restore options before start the actual operation */ validate_restore_options(argv, opts); if (is_encrypt) { @@ -385,6 +386,7 @@ int main(int argc, char** argv) inputFileSpec = NULL; } write_msg(NULL, "restore operation successful\n"); + audit_success(); (void)gettimeofday(&restoreEndTime, NULL); restoreTotalTime = (restoreEndTime.tv_sec - restoreStartTime.tv_sec) * 1000 + (restoreEndTime.tv_usec - restoreStartTime.tv_usec) / 1000; diff --git a/src/bin/pg_probackup/CMakeLists.txt b/src/bin/pg_probackup/CMakeLists.txt index 9d5e9a5d51..062d37c349 100755 --- a/src/bin/pg_probackup/CMakeLists.txt +++ b/src/bin/pg_probackup/CMakeLists.txt @@ -22,7 +22,7 @@ set(TGT_probackup_INC ${ZLIB_INCLUDE_PATH} ${LZ4_INCLUDE_PATH} ${ZSTD_INCLUDE_PA set(probackup_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND -DHAVE_LIBZ) set(probackup_COMPILE_OPTIONS ${PROTECT_OPTIONS} ${BIN_SECURE_OPTIONS} ${OS_OPTIONS} ${WARNING_OPTIONS} ${OPTIMIZE_OPTIONS} ${CHECK_OPTIONS}) set(probackup_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(probackup_LINK_LIBS libpgcommon.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4 -lpq -lpagecompression -lzstd -laws-cpp-sdk-core -laws-cpp-sdk-s3) +set(probackup_LINK_LIBS libpgcommon.a libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4 -lpq -lpagecompression -lzstd -laws-cpp-sdk-core -laws-cpp-sdk-s3) if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") list(APPEND probackup_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) endif() @@ -31,7 +31,7 @@ list(APPEND probackup_LINK_DIRS ${LIBUWAL_LINK_DIRS}) list(APPEND probackup_LINK_OPTIONS ${LIBUWAL_LINK_OPTIONS}) add_bintarget(gs_probackup TGT_probackup_SRC TGT_probackup_INC "${probackup_DEF_OPTIONS}" "${probackup_COMPILE_OPTIONS}" "${probackup_LINK_OPTIONS}" "${probackup_LINK_LIBS}") -add_dependencies(gs_probackup pq pgport_static pagecompression) +add_dependencies(gs_probackup pq pgport_static elog_static pagecompression) target_link_directories(gs_probackup PUBLIC ${CMAKE_BINARY_DIR}/lib ${LIBOPENSSL_LIB_PATH} ${LIBEDIT_LIB_PATH} ${ZSTD_LIB_PATH} ${ZLIB_LIB_PATH} ${KERBEROS_LIB_PATH} ${LZ4_LIB_PATH} ${SECURE_LIB_PATH} ${probackup_LINK_DIRS} ${AWSSDK_LIB_PATH} diff --git a/src/bin/pg_probackup/Makefile b/src/bin/pg_probackup/Makefile index 5cb45cf0ea..cfdc1b0283 100644 --- a/src/bin/pg_probackup/Makefile +++ b/src/bin/pg_probackup/Makefile @@ -44,6 +44,8 @@ LIBS += -llz4 ifeq ($(enable_lite_mode), no) LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss endif +COMMON_OBJS = $(top_builddir)/src/lib/elog/elog.a + PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_builddir)/$(subdir) -I$(LZ4_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) -I$(ZSTD_INCLUDE_PATH) # add page_compression so .h LDFLAGS += -L../../lib/page_compression @@ -60,8 +62,8 @@ LDFLAGS += -L$(AWS_SDK_LIB_PATH) -laws-cpp-sdk-core -laws-cpp-sdk-s3 -I$(AWS_SDK all: submake-pagecompression $(PROGRAM) -gs_probackup: $(OBJS) | submake-libpq submake-libpgport - $(CC) $(CXXFLAGS) $(OBJS) $(LIBS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) -o $@$(X) +gs_probackup: $(OBJS) $(COMMON_OBJS) | submake-libpq submake-libpgport + $(CC) $(CXXFLAGS) $(OBJS) $(COMMON_OBJS) $(LIBS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) -o $@$(X) configuration.o: datapagemap.h backup.o: receivelog.h streamutil.h @@ -110,3 +112,7 @@ uninstall: .PHONY : clean clean distclean maintainer-clean: rm -f gs_probackup $(OBJS) $(EXTRA_CLEAN) + +# Be sure that the necessary archives are compiled +$(top_builddir)/src/lib/elog/elog.a: + $(MAKE) -C $(top_builddir)/src/lib/elog elog.a \ No newline at end of file diff --git a/src/bin/pg_probackup/logger.cpp b/src/bin/pg_probackup/logger.cpp index f375871045..90536fdea2 100644 --- a/src/bin/pg_probackup/logger.cpp +++ b/src/bin/pg_probackup/logger.cpp @@ -725,24 +725,6 @@ static void open_rotationfile(char *control, fclose(control_file); } -void GenerateProgressBar(int percent, char* progressBar) -{ - if (percent > 100) { - percent = 100; - } - - int barWidth = 50; - int filledWidth = (percent * barWidth) / 100; - - progressBar[0] = '['; - - for (int i = 1; i <= barWidth; i++) { - progressBar[i] = (i <= filledWidth) ? '=' : ' '; - } - - progressBar[barWidth + 1] = ']'; - progressBar[barWidth + 2] = '\0'; -} /* * Closes opened file. diff --git a/src/bin/pg_probackup/logger.h b/src/bin/pg_probackup/logger.h index 38fb6e10a9..26ee20ff25 100644 --- a/src/bin/pg_probackup/logger.h +++ b/src/bin/pg_probackup/logger.h @@ -11,6 +11,8 @@ #ifndef LOGGER_H #define LOGGER_H +#include "bin/elog.h" + #define LOG_NONE (-10) /* Log level */ @@ -65,5 +67,4 @@ extern void release_logfile(void); extern int parse_log_level(const char *level); extern const char *deparse_log_level(int level); size_t format_and_insert_text(char *buf, size_t len, const char *fmt, va_list args); -void GenerateProgressBar(int percent, char* progressBar); #endif /* LOGGER_H */ diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index d10d119e22..546a789b46 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -28,6 +28,7 @@ #include "oss/include/restore.h" #define MIN_ULIMIT_STACK_SIZE 8388608 // 1024 * 1024 * 8 +#define PROG_NAME "gs_probackup" const char *PROGRAM_NAME = NULL; /* PROGRAM_NAME_FULL without .exe suffix * if any */ @@ -824,6 +825,7 @@ int main(int argc, char *argv[]) * Make command string before getopt_long() will call. It permutes the * content of argv. */ + init_audit(PROG_NAME, argc, argv); /* TODO why do we do that only for some commands? */ command_name = gs_pstrdup(argv[1]); command = make_command_string(argc, argv); @@ -911,7 +913,11 @@ int main(int argc, char *argv[]) initDataPathStruct(IsDssMode()); /* do actual operation */ - return do_actual_operate(); + errno_t rc = do_actual_operate(); + if (rc == 0) { + audit_success(); + } + return rc; } static void diff --git a/src/include/bin/elog.h b/src/include/bin/elog.h index 7e9072487c..687be16fa4 100644 --- a/src/include/bin/elog.h +++ b/src/include/bin/elog.h @@ -35,4 +35,16 @@ extern void init_log(char* prefix_name); extern void check_env_value_c(const char* input_env_value); extern void check_env_name_c(const char* input_env_value); extern void GenerateProgressBar(int percent, char* progressBar); + +struct auditConfig +{ + bool has_init; + bool is_success; + const char *process_name; + int argc; + char **argv; +}; + +extern void init_audit(const char* prefix_name, int argc, char** argv); //register audit callback on exit +extern void audit_success(); #endif /* COMMON_H */ diff --git a/src/lib/elog/elog.cpp b/src/lib/elog/elog.cpp index 0a91d56099..eb9a1513a3 100644 --- a/src/lib/elog/elog.cpp +++ b/src/lib/elog/elog.cpp @@ -80,6 +80,7 @@ typedef struct ToolLogInfo { #define LOG_MAX_COUNT 50 #define GS_LOCKFILE_SIZE 1024 #define curLogFileMark "-current.log" +#define LOG_DIR_FMT "%s/bin/%s" // optimize,to suppose pirnt to file and screen static bool allow_log_store = false; void check_env_value_c(const char* input_env_value); @@ -602,3 +603,217 @@ void GenerateProgressBar(int percent, char* progressBar) progressBar[barWidth + 1] = ']'; progressBar[barWidth + 2] = '\0'; } + +static FILE *create_audit_file(const char* prefix_name, const char* log_path) { +#define LOG_MAX_SIZE (16 * 1024 * 1024) +#define LOG_MAX_TIMELEN 80 + DIR* dir = NULL; + struct dirent* de = NULL; + bool is_exist = false; + char log_file_name[MAXPGPATH] = {0}; + char log_temp_name[MAXPGPATH] = {0}; + char log_new_name[MAXPGPATH] = {0}; + char log_create_time[LOG_MAX_TIMELEN] = {0}; + char current_localtime[LOG_MAX_TIMELEN] = {0}; + // check validity of current log file name + char* name_ptr = NULL; + + FILE *fp; + int ret = 0; + struct stat statbuf; + + pg_time_t current_time; + struct tm tmp; + struct tm* systm = &tmp; + int nRet = 0; + errno_t rc = 0; + + rc = memset_s(&statbuf, sizeof(statbuf), 0, sizeof(statbuf)); + securec_check_c(rc, "\0", "\0"); + + current_time = time(NULL); + localtime_r((const time_t*)¤t_time, systm); + if (NULL != systm) { + (void)strftime(current_localtime, LOG_MAX_TIMELEN, "%Y-%m-%d %H:%M:%S", systm); + (void)strftime(log_create_time, LOG_MAX_TIMELEN, "-%Y-%m-%d_%H%M%S", systm); + } + + if (NULL == (dir = opendir(log_path))) { + printf(_("%s: opendir %s failed! %s\n"), prefix_name, log_path, gs_strerror(errno)); + + return NULL; + } + + while (NULL != (de = readdir(dir))) { + // exist current log file + if (NULL != strstr(de->d_name, prefix_name)) { + name_ptr = strstr(de->d_name, "-current.log"); + if (NULL != name_ptr) { + name_ptr += strlen("-current.log"); + if ('\0' == (*name_ptr)) { + nRet = snprintf_s(log_file_name, MAXPGPATH, MAXPGPATH - 1, "%s/%s", log_path, de->d_name); + securec_check_ss_c(nRet, "\0", "\0"); + is_exist = true; + fp = fopen(log_file_name, "a"); + if (fp == NULL) { + printf(_("%s: open audit file %s failed!\n"), prefix_name, log_file_name); + (void)closedir(dir); + return NULL; + } + + (void)lstat(log_file_name, &statbuf); + if (statbuf.st_size > LOG_MAX_SIZE) { + set_log_filename(log_temp_name, de->d_name); + nRet = snprintf_s(log_new_name, MAXPGPATH, MAXPGPATH - 1, "%s/%s", log_path, log_temp_name); + securec_check_ss_c(nRet, "\0", "\0"); + ret = rename(log_file_name, log_new_name); + if (0 != ret) { + printf(_("%s: rename audit file %s failed!\n"), prefix_name, log_file_name); + (void)closedir(dir); + return NULL; + } + } + (void)closedir(dir); + return fp; + } + } + } + } + // current log file not exist + if (!is_exist) { + nRet = snprintf_s(log_file_name, + MAXPGPATH, + MAXPGPATH - 1, + "%s/%s%s%s", + log_path, + prefix_name, + log_create_time, + curLogFileMark); + securec_check_ss_c(nRet, "\0", "\0"); + + fp = fopen(log_file_name, "a"); + if (fp == NULL) { + printf(_("%s: open audit file %s failed!\n"), prefix_name, log_file_name); + (void)closedir(dir); + return NULL; + } + rc = chmod(log_file_name, S_IRUSR | S_IWUSR); + if (rc != 0) { + printf(_("%s: chmod audit file %s failed!\n"), prefix_name, log_file_name); + (void)fclose(fp); + (void)closedir(dir); + return NULL; + } + } + (void)closedir(dir); + return fp; +} + +static void get_cur_time(char * current_localtime) { + pg_time_t current_time = time(NULL); + struct tm tmp; + struct tm* systm = &tmp; + localtime_r((const time_t*)¤t_time, systm); + if (NULL != systm) { + (void)strftime(current_localtime, LOG_MAX_TIMELEN, "%Y-%m-%d %H:%M:%S", systm); + } +} + +static void report_command(FILE *fp, auditConfig *audit_cfg) { + const char* process_name = audit_cfg->process_name; + int argc = audit_cfg->argc; + char** argv = audit_cfg->argv; + bool is_success = audit_cfg->is_success; + + errno_t rc; + char command[MAXPGPATH] = {0}; + + rc = strcat_s(command, MAXPGPATH, process_name); + securec_check_c(rc, "\0", "\0"); + rc = strcat_s(command, MAXPGPATH, " "); + securec_check_c(rc, "\0", "\0"); + for (int i = 1; ipw_name, command); + } else { + (void)fprintf(fp, _("[%s] [%s] %s\n"), current_localtime, (is_success ? "SUCCESS" : "FAILURE"), command); + } +} + +static void get_log_dir(const char *process_name, char *log_dir) { + char* gausslog_dir = NULL; + int nRet = 0; + gausslog_dir = gs_getenv_r("GAUSSLOG"); + check_env_value_c(gausslog_dir); + if ((NULL == gausslog_dir) || ('\0' == gausslog_dir[0])) { + return; + } + if (!is_absolute_path(gausslog_dir)) { + printf(_("current path is not absolute path ,can't find the exec path.\n")); + return; + } + nRet = snprintf_s(log_dir, MAXPGPATH, MAXPGPATH - 1, LOG_DIR_FMT, gausslog_dir, process_name); + securec_check_ss_c(nRet, "\0", "\0"); +} + +auditConfig audit_cfg; + +static void audit_report() { + if (!audit_cfg.has_init) { + return; + } + // init log + char log_dir[MAXPGPATH] = {0}; + get_log_dir(audit_cfg.process_name, log_dir); + if ('\0' == log_dir[0]) { + return; + } + + if (0 != pg_mkdir_p(log_dir, S_IRWXU)) { + if (EEXIST != errno) { + printf(_("could not create directory %s: %m\n"), log_dir); + return; + } + } + // create audit file + FILE *fp = create_audit_file("audit", log_dir); + if (fp == NULL) { + printf(_("Warning: create_audit_file failed!\n")); + return; + } + + // audit report + report_command(fp, &audit_cfg); + + //close fd + fclose(fp); +} + +void init_audit(const char* process_name, int argc, char** argv) { + audit_cfg.has_init = true; + audit_cfg.is_success = false; + audit_cfg.process_name = process_name; + audit_cfg.argc = argc; + audit_cfg.argv = argv; + atexit(audit_report); +} + +void audit_success() { + audit_cfg.is_success = true; +} -- Gitee From aa98836587f870a46c3d2cde848a0b814adfd9e3 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 29 Jul 2024 11:40:07 +0800 Subject: [PATCH 113/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8DB=E5=BA=93=E5=B9=B6?= =?UTF-8?q?=E8=A1=8C=E9=80=BB=E8=BE=91=E8=A7=A3=E7=A0=81=E7=9A=84core?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/init/postinit.cpp | 2 ++ src/gausskernel/process/tcop/postgres.cpp | 5 +++++ .../storage/replication/logical/parallel_decode_worker.cpp | 4 ++++ 3 files changed, 11 insertions(+) diff --git a/src/common/backend/utils/init/postinit.cpp b/src/common/backend/utils/init/postinit.cpp index fa9642a3ab..ee1cc69217 100644 --- a/src/common/backend/utils/init/postinit.cpp +++ b/src/common/backend/utils/init/postinit.cpp @@ -1465,6 +1465,8 @@ void PostgresInitializer::InitParallelDecode() InitSettings(); + InitExtensionVariable(); + FinishInit(); } diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index 1a8ea3221d..feef906f28 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -7703,6 +7703,11 @@ void LoadSqlPlugin() ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("Please use the original role to connect B-compatibility database first, to load extension dolphin"))); } + + /* Creating extension dolphin must init mask_password_mem_cxt before */ + Assert(t_thrd.mem_cxt.mask_password_mem_cxt != NULL); + Assert(IsNormalProcessingMode()); + /* recheck and load dolphin within lock */ t_thrd.utils_cxt.holdLoadPluginLock[DB_CMPT_B] = true; pthread_mutex_lock(&g_instance.loadPluginLock[DB_CMPT_B]); diff --git a/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp b/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp index 301f9e908b..075203fa96 100644 --- a/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp +++ b/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp @@ -1107,6 +1107,10 @@ void ParallelDecodeWorkerMain(void* point) pgstat_report_appname("LogicalDecodeWorker"); pgstat_report_activity(STATE_IDLE, NULL); +#if (!defined(ENABLE_MULTIPLE_NODES)) && (!defined(ENABLE_PRIVATEGAUSS)) + LoadSqlPlugin(); +#endif + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel decoder resource owner", THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); -- Gitee From ab44cdeea802d04557c4113aedc8a8d2838e313e Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Tue, 30 Jul 2024 17:24:02 +0800 Subject: [PATCH 114/347] =?UTF-8?q?=E5=9B=9E=E9=80=80repl=5Fscanner.l?= =?UTF-8?q?=E7=9A=84=E4=BF=AE=E6=94=B9=EF=BC=8C=E5=9B=A0=E4=B8=BA=E9=A9=B1?= =?UTF-8?q?=E5=8A=A8=E4=B8=AD=E5=9C=A8=E5=BB=BA=E7=AB=8B=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E6=97=B6=E4=BC=9A=E5=8F=91sql?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/replication/repl_scanner.l | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/gausskernel/storage/replication/repl_scanner.l b/src/gausskernel/storage/replication/repl_scanner.l index 46107a39c6..bcfc125d8a 100755 --- a/src/gausskernel/storage/replication/repl_scanner.l +++ b/src/gausskernel/storage/replication/repl_scanner.l @@ -202,13 +202,7 @@ WAIT { return K_WAIT; } } . { - if (u_sess->proc_cxt.clientIsSubscription) { - return T_WORD; - } - - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("syntax error: unexpected character \"%s\"", yytext))); + return T_WORD; } %% -- Gitee From b753ac26c4f10f5da274732bf71f6651adb9498e Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Sat, 27 Jul 2024 18:08:50 +0800 Subject: [PATCH 115/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E7=AC=AC=E4=BA=8C?= =?UTF-8?q?=E4=B8=AA=E5=8F=82=E6=95=B0=E4=B8=BA=E5=88=97=E5=BC=95=E7=94=A8?= =?UTF-8?q?=E6=97=B6=E7=9A=84=E5=A4=84=E7=90=86=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/builtin_funcs.ini | 12 +- src/common/backend/parser/gram.y | 6 +- src/common/backend/utils/adt/float.cpp | 29 ++++- .../rollback-post_catalog_maindb_92_940.sql | 8 +- .../rollback-post_catalog_otherdb_92_940.sql | 8 +- .../upgrade-post_catalog_maindb_92_940.sql | 30 ++--- .../upgrade-post_catalog_otherdb_92_940.sql | 30 ++--- .../regress/expected/func_to_binary_float.out | 122 +++++++++++++++++- src/test/regress/sql/func_to_binary_float.sql | 21 ++- 9 files changed, 209 insertions(+), 57 deletions(-) diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 76322cf17b..40be2370af 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -12117,12 +12117,12 @@ AddFuncGroup( ), AddFuncGroup( "to_binary_float", 6, - AddBuiltinFunc(_0(7012), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_text), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 25, 25, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_text"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(7013), _1("to_binary_float"), _2(1), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, ' ', false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(7014), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_number), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 701, 701, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_number"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(7015), _1("to_binary_float"), _2(1), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, 0, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(7016), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 701, 25, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, 0, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(7017), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_text_number), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 25, 701, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_text_number"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(7012), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_text), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(4, 25, 25, 16, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_text"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7013), _1("to_binary_float"), _2(1), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, ' ', false, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7014), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_number), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(4, 701, 701, 16, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_number"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7015), _1("to_binary_float"), _2(1), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, 0, false, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7016), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(NULL), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(4, 701, 25, 16, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select pg_catalog.to_binary_float($1, 0, false, false)"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert float8 to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(7017), _1("to_binary_float"), _2(3), _3(false), _4(false), _5(to_binary_float_text_number), _6(700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(4, 25, 701, 16, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("to_binary_float_text_number"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("convert text to a single precision floating-point number, with default return expr on convert error"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "to_char", 11, diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index a75f413607..3a00caa610 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -27770,16 +27770,12 @@ func_application_special: func_name '(' ')' (errcode(ERRCODE_SYNTAX_ERROR), errmsg("The syntax or function is not supported. \"%s\"", $4))); } - if (IsA($5, ColumnRef)) { - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("Default param can't be ColumnRef"))); - } FuncCall *n = makeNode(FuncCall); n->funcname = $1; n->args = lappend($3, $5); n->args = lappend(n->args, makeBoolAConst(TRUE, -1)); + n->args = lappend(n->args, makeBoolAConst(IsA($5, ColumnRef), -1)); n->agg_order = $9; n->agg_star = FALSE; n->agg_distinct = FALSE; diff --git a/src/common/backend/utils/adt/float.cpp b/src/common/backend/utils/adt/float.cpp index 70f5bf05c1..9e84c0a5ae 100644 --- a/src/common/backend/utils/adt/float.cpp +++ b/src/common/backend/utils/adt/float.cpp @@ -3035,6 +3035,11 @@ static double to_binary_float_internal(char* origin_num, bool *err) /* * to_binary_float_text() - convert to a single precision floating-point number. + * + * arg[0]: input arg; + * arg[1]: default arg; + * arg[2]: has default arg; + * arg[3]: default is column ref. */ Datum to_binary_float_text(PG_FUNCTION_ARGS) { @@ -3046,6 +3051,13 @@ Datum to_binary_float_text(PG_FUNCTION_ARGS) double result, r1, r2; bool err1, err2; + // if default arg is col, report error + if (with_default && PG_GETARG_BOOL(3)) { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("default argument must be a literal or bind"))); + } + err1 = true; if (!str1_null) { num1 = TextDatumGetCString(PG_GETARG_TEXT_P(0)); @@ -3111,8 +3123,9 @@ static double handle_float4_overflow(double val) */ Datum to_binary_float_number(PG_FUNCTION_ARGS) { - if (PG_ARGISNULL(0)) + if (PG_ARGISNULL(0)) { PG_RETURN_NULL(); + } float8 val = handle_float4_overflow(PG_GETARG_FLOAT8(0)); @@ -3121,15 +3134,21 @@ Datum to_binary_float_number(PG_FUNCTION_ARGS) Datum to_binary_float_text_number(PG_FUNCTION_ARGS) { - if (PG_ARGISNULL(0)) - PG_RETURN_NULL(); - bool with_default = PG_GETARG_BOOL(2); - char *num; double result; bool err; + if (with_default && PG_GETARG_BOOL(3)) { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("default argument must be a literal or bind"))); + } + + if (PG_ARGISNULL(0)) { + PG_RETURN_NULL(); + } + err = false; num = TextDatumGetCString(PG_GETARG_TEXT_P(0)); result = to_binary_float_internal(num, &err); diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql index 0759064022..eb515df38b 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_940.sql @@ -2,10 +2,10 @@ DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool, bool) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool, bool) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool, bool) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool, bool) CASCADE; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql index 0759064022..eb515df38b 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_940.sql @@ -2,10 +2,10 @@ DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool, bool) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool, bool) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool, bool) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool, bool) CASCADE; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql index 634287732d..f0a1921de3 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_940.sql @@ -1,11 +1,11 @@ -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7012; -CREATE FUNCTION pg_catalog.to_binary_float(text, text, bool) +CREATE FUNCTION pg_catalog.to_binary_float(text, text, bool, bool) RETURNS float4 as 'to_binary_float_text' LANGUAGE INTERNAL IMMUTABLE; -COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; @@ -14,21 +14,21 @@ CREATE FUNCTION pg_catalog.to_binary_float(text) RETURNS float4 AS $$ BEGIN - RETURN (select pg_catalog.to_binary_float($1, ' ', false)); + RETURN (select pg_catalog.to_binary_float($1, ' ', false, false)); END; $$ LANGUAGE plpgsql; COMMENT ON FUNCTION pg_catalog.to_binary_float(text) IS 'convert text to a single precision floating-point number'; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7014; -CREATE FUNCTION pg_catalog.to_binary_float(float8, float8, bool) +CREATE FUNCTION pg_catalog.to_binary_float(float8, float8, bool, bool) RETURNS float4 as 'to_binary_float_number' LANGUAGE INTERNAL IMMUTABLE; -COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; @@ -37,31 +37,31 @@ CREATE FUNCTION pg_catalog.to_binary_float(float8) RETURNS float4 AS $$ BEGIN - RETURN (select pg_catalog.to_binary_float($1, 0, false)); + RETURN (select pg_catalog.to_binary_float($1, 0, false, false)); END; $$ LANGUAGE plpgsql; COMMENT ON FUNCTION pg_catalog.to_binary_float(float8) IS 'convert float8 to a single precision floating-point number'; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7016; -CREATE FUNCTION pg_catalog.to_binary_float(float8, text, bool) +CREATE FUNCTION pg_catalog.to_binary_float(float8, text, bool, bool) RETURNS float4 AS $$ BEGIN - RETURN (select pg_catalog.to_binary_float($1, 0, false)); + RETURN (select pg_catalog.to_binary_float($1, 0, false, false)); END; $$ LANGUAGE plpgsql; -COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7017; -CREATE FUNCTION pg_catalog.to_binary_float(text, float8, bool) +CREATE FUNCTION pg_catalog.to_binary_float(text, float8, bool, bool) RETURNS float4 as 'to_binary_float_text_number' LANGUAGE INTERNAL IMMUTABLE; -COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql index 634287732d..f0a1921de3 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_940.sql @@ -1,11 +1,11 @@ -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, text, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7012; -CREATE FUNCTION pg_catalog.to_binary_float(text, text, bool) +CREATE FUNCTION pg_catalog.to_binary_float(text, text, bool, bool) RETURNS float4 as 'to_binary_float_text' LANGUAGE INTERNAL IMMUTABLE; -COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text) CASCADE; @@ -14,21 +14,21 @@ CREATE FUNCTION pg_catalog.to_binary_float(text) RETURNS float4 AS $$ BEGIN - RETURN (select pg_catalog.to_binary_float($1, ' ', false)); + RETURN (select pg_catalog.to_binary_float($1, ' ', false, false)); END; $$ LANGUAGE plpgsql; COMMENT ON FUNCTION pg_catalog.to_binary_float(text) IS 'convert text to a single precision floating-point number'; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, float8, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7014; -CREATE FUNCTION pg_catalog.to_binary_float(float8, float8, bool) +CREATE FUNCTION pg_catalog.to_binary_float(float8, float8, bool, bool) RETURNS float4 as 'to_binary_float_number' LANGUAGE INTERNAL IMMUTABLE; -COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8) CASCADE; @@ -37,31 +37,31 @@ CREATE FUNCTION pg_catalog.to_binary_float(float8) RETURNS float4 AS $$ BEGIN - RETURN (select pg_catalog.to_binary_float($1, 0, false)); + RETURN (select pg_catalog.to_binary_float($1, 0, false, false)); END; $$ LANGUAGE plpgsql; COMMENT ON FUNCTION pg_catalog.to_binary_float(float8) IS 'convert float8 to a single precision floating-point number'; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(float8, text, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7016; -CREATE FUNCTION pg_catalog.to_binary_float(float8, text, bool) +CREATE FUNCTION pg_catalog.to_binary_float(float8, text, bool, bool) RETURNS float4 AS $$ BEGIN - RETURN (select pg_catalog.to_binary_float($1, 0, false)); + RETURN (select pg_catalog.to_binary_float($1, 0, false, false)); END; $$ LANGUAGE plpgsql; -COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(float8, float8, bool, bool) IS 'convert float8 to a single precision floating-point number, with default return expr on convert error'; -DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.to_binary_float(text, float8, bool, bool) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 7017; -CREATE FUNCTION pg_catalog.to_binary_float(text, float8, bool) +CREATE FUNCTION pg_catalog.to_binary_float(text, float8, bool, bool) RETURNS float4 as 'to_binary_float_text_number' LANGUAGE INTERNAL IMMUTABLE; -COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; +COMMENT ON FUNCTION pg_catalog.to_binary_float(text, text, bool, bool) IS 'convert text to a single precision floating-point number, with default return expr on convert error'; diff --git a/src/test/regress/expected/func_to_binary_float.out b/src/test/regress/expected/func_to_binary_float.out index 5732a29c4f..d11c3ad1b7 100644 --- a/src/test/regress/expected/func_to_binary_float.out +++ b/src/test/regress/expected/func_to_binary_float.out @@ -175,8 +175,19 @@ SELECT TO_BINARY_FLOAT(c1 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1 8 (8 rows) -SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- errorя╝М column can't be default param -ERROR: Default param can't be ColumnRef +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error, column can't be default param + to_binary_float +----------------- + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 +(8 rows) + SELECT TO_BINARY_FLOAT(c2 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; to_binary_float ----------------- @@ -434,6 +445,7 @@ SELECT TO_BINARY_FLOAT(' 6.66 ' DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error ERROR: invalid input syntax for type real CONTEXT: referenced column: to_binary_float +<<<<<<< HEAD SELECT TO_BINARY_FLOAT(3.402E+100); to_binary_float ----------------- @@ -446,6 +458,112 @@ SELECT TO_BINARY_FLOAT('test' DEFAULT 3.04E+100 ON CONVERSION ERROR); Infinity (1 row) +======= +-- test default column +SELECT TO_BINARY_FLOAT(1.79E+309 DEFAULT y ON CONVERSION ERROR); +ERROR: column "y" does not exist +LINE 1: SELECT TO_BINARY_FLOAT(1.79E+309 DEFAULT y ON CONVERSION ERR... + ^ +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(c3 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 1.23 + 3.14157 + 2.02405e+11 + + + NaN + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(c4 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 +(8 rows) + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 + 3.14 +(8 rows) + +SELECT TO_BINARY_FLOAT(3.14E+100 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + Infinity + Infinity + Infinity + Infinity + Infinity + Infinity + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(3.14E+100 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; + to_binary_float +----------------- + Infinity + Infinity + Infinity + Infinity + Infinity + Infinity + Infinity + Infinity +(8 rows) + +SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- overflow +ERROR: "31400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- overflow +ERROR: "31400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('3.14' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('3.14' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('3.14E+100' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('3.14E+100' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('3.14E+400' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('3.14E+400' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(NULL DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(NULL DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error +ERROR: default argument must be a literal or bind +CONTEXT: referenced column: to_binary_float +>>>>>>> 1073a6efb (修改第二个参数为列引用时的处理逻辑) -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); to_binary_float diff --git a/src/test/regress/sql/func_to_binary_float.sql b/src/test/regress/sql/func_to_binary_float.sql index 23416193b6..625f527cc6 100644 --- a/src/test/regress/sql/func_to_binary_float.sql +++ b/src/test/regress/sql/func_to_binary_float.sql @@ -43,7 +43,7 @@ SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By SELECT TO_BINARY_FLOAT(c1 DEFAULT '3.14FDW' ON CONVERSION ERROR) FROM tbf ORDER By c1; SELECT TO_BINARY_FLOAT(c1 DEFAULT NULL ON CONVERSION ERROR) FROM tbf ORDER By c1; -SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- errorя╝М column can't be default param +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c1 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error, column can't be default param SELECT TO_BINARY_FLOAT(c2 DEFAULT 3.14 ON CONVERSION ERROR) FROM tbf ORDER By c1; SELECT TO_BINARY_FLOAT(c2 DEFAULT '3.14' ON CONVERSION ERROR) FROM tbf ORDER By c1; @@ -78,6 +78,25 @@ SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error SELECT TO_BINARY_FLOAT(3.402E+100); SELECT TO_BINARY_FLOAT('test' DEFAULT 3.04E+100 ON CONVERSION ERROR); +-- test default column +SELECT TO_BINARY_FLOAT(1.79E+309 DEFAULT y ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(c3 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(c4 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(3.14 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(3.14E+100 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(3.14E+100 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; +SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- overflow +SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- overflow +SELECT TO_BINARY_FLOAT('3.14' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT('3.14' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT('3.14E+100' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT('3.14E+100' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT('3.14E+400' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT('3.14E+400' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error +SELECT TO_BINARY_FLOAT(NULL DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error +SELECT TO_BINARY_FLOAT(NULL DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error + -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT(2.22507485850720E-100 DEFAULT 3.14 ON CONVERSION ERROR); -- Gitee From f503dfa1da20164bdc228d90e0cbfed2d8263ceb Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Tue, 30 Jul 2024 19:27:38 +0800 Subject: [PATCH 116/347] bugfix: error code --- src/test/regress/expected/func_to_binary_float.out | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/test/regress/expected/func_to_binary_float.out b/src/test/regress/expected/func_to_binary_float.out index d11c3ad1b7..3fcd424021 100644 --- a/src/test/regress/expected/func_to_binary_float.out +++ b/src/test/regress/expected/func_to_binary_float.out @@ -445,7 +445,6 @@ SELECT TO_BINARY_FLOAT(' 6.66 ' DEFAULT 3.14 ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT('today' DEFAULT 'roll' ON CONVERSION ERROR); -- error ERROR: invalid input syntax for type real CONTEXT: referenced column: to_binary_float -<<<<<<< HEAD SELECT TO_BINARY_FLOAT(3.402E+100); to_binary_float ----------------- @@ -458,7 +457,6 @@ SELECT TO_BINARY_FLOAT('test' DEFAULT 3.04E+100 ON CONVERSION ERROR); Infinity (1 row) -======= -- test default column SELECT TO_BINARY_FLOAT(1.79E+309 DEFAULT y ON CONVERSION ERROR); ERROR: column "y" does not exist @@ -563,7 +561,6 @@ CONTEXT: referenced column: to_binary_float SELECT TO_BINARY_FLOAT(NULL DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error ERROR: default argument must be a literal or bind CONTEXT: referenced column: to_binary_float ->>>>>>> 1073a6efb (修改第二个参数为列引用时的处理逻辑) -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); to_binary_float -- Gitee From 0a32e3913ba48a434699822752a28f3091e5cb86 Mon Sep 17 00:00:00 2001 From: lukeman Date: Mon, 29 Jul 2024 21:14:36 +0800 Subject: [PATCH 117/347] =?UTF-8?q?=E6=94=AF=E6=8C=81mysql=E4=B8=AD?= =?UTF-8?q?=E7=9A=84select=20@@IDENTITY=20=E8=AF=AD=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 12ababad01..17de120b48 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -9076,6 +9076,11 @@ void ExecSetVariableStmt(VariableSetStmt* stmt, ParamListInfo paramInfo) process_set_names_collate(stmt, action); break; } + if (strcasecmp(stmt->name, "identity") == 0 || + strcasecmp(stmt->name, "last_insert_id") == 0) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("identity and last_insert_id is not supported for setting"))); + } (void)set_config_option(stmt->name, ExtractSetVariableArgs(stmt), ((superuser() || (isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) ? @@ -10617,6 +10622,10 @@ static char* _ShowOption(struct config_generic* record, bool use_units, bool is_ if (conf->show_hook && is_show) val = (*conf->show_hook)(); + else if (strcasecmp(record->name, "identity") == 0 || + strcasecmp(record->name, "last_insert_id") == 0) { + val = (*conf->show_hook)(); + } else if (*conf->variable && **conf->variable) val = *conf->variable; else -- Gitee From 2e02f74c3a26d3a1723f25d9d01b1b3fdca93a9e Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Wed, 5 Jun 2024 11:02:25 +0800 Subject: [PATCH 118/347] =?UTF-8?q?=E3=80=90feature=E3=80=91=E6=8C=89?= =?UTF-8?q?=E9=9C=80=E5=9B=9E=E6=94=BEredo=E9=98=B6=E6=AE=B5=E9=80=82?= =?UTF-8?q?=E9=85=8Dddl?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/tcop/utility.cpp | 644 +++++++++++++++++- .../ondemand_extreme_rto/page_redo.cpp | 20 +- .../ondemand_extreme_rto/redo_utils.cpp | 113 ++- .../storage/access/transam/xlog.cpp | 1 - src/gausskernel/storage/lmgr/lwlock.cpp | 10 +- .../access/ondemand_extreme_rto/batch_redo.h | 2 + src/include/storage/buf/bufmgr.h | 2 + src/include/storage/lock/lwlock.h | 9 +- 8 files changed, 784 insertions(+), 17 deletions(-) diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 811ccec86e..12154a8aca 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -26,6 +26,7 @@ #include "access/transam.h" #include "access/twophase.h" #include "access/xact.h" +#include "access/multi_redo_api.h" #include "access/xlog.h" #include "catalog/catalog.h" #include "catalog/index.h" @@ -123,6 +124,7 @@ #include "tsdb/optimizer/policy.h" #include "tsdb/time_bucket.h" #include "streaming/streaming_catalog.h" +#include "storage/buf/bufmgr.h" #endif /* ENABLE_MULTIPLE_NODES */ #ifdef PGXC #include "pgxc/barrier.h" @@ -249,6 +251,9 @@ extern void ts_check_feature_disable(); static bool IsAllTempObjectsInVacuumStmt(Node* parsetree); static int64 getCopySequenceMaxval(const char *nspname, const char *relname, const char *colname); static int64 getCopySequenceCountval(const char *nspname, const char *relname); +static void PreRedoInOndemandRecovery(Node* parse_tree); +static void PreRedoIndexInOndemandRecovery(Oid indexId); +static void PreRedoTableInOndemandRecovery(Oid relId); /* the hash value of extension script */ #define POSTGIS_VERSION_NUM 2 @@ -647,19 +652,36 @@ void PreventCommandDuringRecovery(const char* cmd_name) void PreventCommandDuringSSOndemandRedo(Node* parseTree) { switch(nodeTag(parseTree)) { + case T_AlterSchemaStmt: case T_InsertStmt: case T_DeleteStmt: case T_UpdateStmt: case T_SelectStmt: + case T_AlterTableStmt: + case T_CreateStmt: /* no need to adapt */ + case T_DropStmt: + case T_IndexStmt: + case T_CreateFunctionStmt: /* no need to adapt */ + case T_AlterFunctionStmt: /* no need to adapt */ + case T_CompileStmt: + case T_RenameStmt: case T_TransactionStmt: + case T_ViewStmt: /* no need to adapt */ + case T_CreateTableAsStmt: case T_VariableSetStmt: case T_VariableShowStmt: + case T_ReindexStmt: + case T_CreateSchemaStmt: + case T_AlterDatabaseStmt: /* no need to adapt */ + case T_AlterDatabaseSetStmt: /* no need to adapt */ + case T_AlterObjectSchemaStmt: + case T_AlterOwnerStmt: break; default: if (SS_IN_ONDEMAND_RECOVERY) { ereport(ERROR, (errcode(ERRCODE_RUN_TRANSACTION_DURING_RECOVERY), - errmsg("only support INSERT/UPDATE/DELETE/SELECT/SET/SHOW during SS on-demand recovery, " + errmsg("only support INSERT/UPDATE/DELETE/SELECT/SET/SHOW/CALL, and ALTER/CREATE/DROP on Table/Index/View/Procedure/Schema, and ALTER on Database during SS on-demand recovery, " "command %d", nodeTag(parseTree)))); } break; @@ -2665,6 +2687,8 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, if (completion_tag != NULL) completion_tag[0] = '\0'; + PreRedoInOndemandRecovery(parse_tree); + errno_t errorno = EOK; switch (nodeTag(parse_tree)) { /* @@ -13879,3 +13903,621 @@ static int64 getCopySequenceMaxval(const char *nspname, const char *relname, con SPI_finish(); return DatumGetInt64(attval); } + +struct OndemandParseInfo { + NodeTag parseType; + ObjectType objectType; + RangeVar* relationRangeVar; +}; + +static List* AppendItemToOndemandParseList(List* ondemandParseList, NodeTag parseType, ObjectType objectType, RangeVar* relationRangeVar) { + OndemandParseInfo* info = (OndemandParseInfo*)palloc(sizeof(OndemandParseInfo)); + info->parseType = parseType; + info->objectType = objectType; + info->relationRangeVar = relationRangeVar; + ondemandParseList = lappend(ondemandParseList, info); + return ondemandParseList; +} + +/** + * @brief Get the table by index rangeVar, where the index is on, + * and pre redo the table. + * + * @param ondemandParseInfo + */ +static void PreRedoRelationByIndexRangeVar(OndemandParseInfo* ondemandParseInfo) { + Oid relId = RangeVarGetRelid(ondemandParseInfo->relationRangeVar, NoLock, true); + if (!OidIsValid(relId)) { + ereport(LOG, + (errmsg("[On-demand] The relation doesn't exists, no need to redo, relname: %s.", + ondemandParseInfo->relationRangeVar->relname))); + return; + } + Oid relationOid = IndexGetRelation(relId, true); + if (OidIsValid(relationOid)) { + PreRedoTableInOndemandRecovery(relationOid); + } else { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", relId))); + } +} + +/** + * @brief pre redo in ondemand redo phase before execute ddl. + * + * @param parseTree + */ +static void PreRedoInOndemandRecovery(Node* parseTree) { + if (!ENABLE_ONDEMAND_RECOVERY || !SS_IN_ONDEMAND_RECOVERY) { + return; + } + + List* ondemandParseList = NULL; + switch (nodeTag(parseTree)) { + /* ALTER PROCEDURE XXX COMPLIE */ + case T_CompileStmt: { + CompileStmt* compileStmt = (CompileStmt*) parseTree; + switch (compileStmt->compileItem) { + case COMPILE_PROCEDURE: { + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + parseTree->type, compileStmt->compileItem))); + break; + } + } + break; + } + /* ALTER TABLE */ + case T_AlterTableStmt: { + AlterTableStmt* alterTableStmt = (AlterTableStmt*) parseTree; + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_AlterTableStmt, + alterTableStmt->relkind, alterTableStmt->relation); + break; + } + /* DROP INDEX/TABLE/PROCEDURE/VIEW/SCHEMA */ + case T_DropStmt: { + DropStmt* dropStmt = (DropStmt*) parseTree; + ListCell* cell = NULL; + foreach (cell, dropStmt->objects) { + RangeVar* relationRangeVar = makeRangeVarFromNameList((List*)lfirst(cell)); + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_DropStmt, + dropStmt->removeType, relationRangeVar); + } + break; + } + /* CREATE INDEX */ + case T_IndexStmt: { + IndexStmt* indexStmt = (IndexStmt*) parseTree; + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_IndexStmt, + OBJECT_INDEX, indexStmt->relation); + break; + } + /* ALTER DATABASE/PROCEDURE/TABLE/INDEX/VIEW RENAME TO */ + case T_RenameStmt: { + RenameStmt* renameStmt = (RenameStmt*) parseTree; + if (renameStmt->renameType == OBJECT_PARTITION || renameStmt->renameType == OBJECT_COLUMN) { + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_RenameStmt, + renameStmt->relationType, renameStmt->relation); + } else { + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_RenameStmt, + renameStmt->renameType, renameStmt->relation); + } + + break; + } + /*ALTER TABLE/INDEX REBUILD */ + case T_ReindexStmt: { + ReindexStmt* reindexStmt = (ReindexStmt*) parseTree; + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_ReindexStmt, + reindexStmt->kind, reindexStmt->relation); + break; + } + /* ALTER TABLE/INDEX/PROCEDURE/ SET SCHEMA */ + case T_AlterObjectSchemaStmt: { + AlterObjectSchemaStmt* alterObjectSchemaStmt = (AlterObjectSchemaStmt*) parseTree; + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_AlterObjectSchemaStmt, + alterObjectSchemaStmt->objectType, alterObjectSchemaStmt->relation); + break; + } + case T_AlterOwnerStmt: { + AlterOwnerStmt* alterOwnerStmt = (AlterOwnerStmt*) parseTree; + ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_AlterObjectSchemaStmt, + alterOwnerStmt->objectType, NULL); + break; + } + /** + * Some syntax types no need to adapt, in ondemand reovery phase. + * See PreventCommandDuringSSOndemandRedo to get all the support syntax type. + */ + default: + break; + } + + ListCell* cell = NULL; + foreach (cell, ondemandParseList) { + OndemandParseInfo* ondemandParseInfo = (OndemandParseInfo*)lfirst(cell); + switch (ondemandParseInfo->objectType) { + case OBJECT_DATABASE: { + switch (ondemandParseInfo->parseType) { + case T_RenameStmt: + case T_AlterOwnerStmt: { + break; + } + case T_ReindexStmt: + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_FUNCTION: { + switch (ondemandParseInfo->parseType) { + case T_DropStmt: + case T_RenameStmt: + case T_AlterObjectSchemaStmt: + case T_AlterOwnerStmt: { + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_INDEX: { + switch (ondemandParseInfo->parseType) { + case T_AlterTableStmt: + case T_DropStmt: { + Oid relId = RangeVarGetRelid(ondemandParseInfo->relationRangeVar, NoLock, true); + if (!OidIsValid(relId)) { + ereport(LOG, + (errmsg("[On-demand] The relation doesn't exists, no need to redo, relname: %s.", ondemandParseInfo->relationRangeVar->relname))); + break; + } + PreRedoIndexInOndemandRecovery(relId); + break; + } + case T_IndexStmt: { + RangeVar* relationRangeVar = ondemandParseInfo->relationRangeVar; + char* relname = relationRangeVar->relname; + Oid namespaceId = RangeVarGetCreationNamespace(relationRangeVar); + HeapTuple tuple = SearchSysCache2(RELNAMENSP, PointerGetDatum(relationRangeVar->relname), ObjectIdGetDatum(namespaceId)); + if (!HeapTupleIsValid(tuple)) { + ereport(LOG, + (errmsg("[On-demand] The relation doesn't exists, no need to redo, relname: %s.", + relname))); + break; + } + Oid relId = HeapTupleGetOid(tuple); + ReleaseSysCache(tuple); + /* + * Pre-redo the relation which this index is on before create index. + * We will pre-redo all the relation and partition associated with the relation. + */ + PreRedoTableInOndemandRecovery(relId); + break; + } + case T_ReindexStmt: + case T_RenameStmt: { + PreRedoRelationByIndexRangeVar(ondemandParseInfo); + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_INDEX_PARTITION: { + switch (ondemandParseInfo->parseType) { + case T_ReindexStmt: { + PreRedoRelationByIndexRangeVar(ondemandParseInfo); + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_SCHEMA: { + switch (ondemandParseInfo->parseType) { + case T_DropStmt: { + RangeVar* schemaRangeVar = ondemandParseInfo->relationRangeVar; + char* schemaName = schemaRangeVar->relname; + Oid schemaOid = get_namespace_oid(schemaName, true); + if (!OidIsValid(schemaOid)) { + ereport(LOG, + (errmsg("[On-demand] The schema doesn't exists, no need to redo, schemaname: %s.", schemaName))); + break; + } + RedoDatabaseForOndemandExtremeRTO(u_sess->proc_cxt.MyDatabaseId); + break; + } + case T_RenameStmt: + case T_AlterOwnerStmt: { + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_PARTITION_INDEX: { + switch (ondemandParseInfo->parseType) { + case T_RenameStmt: { + Oid relId = RangeVarGetRelid(ondemandParseInfo->relationRangeVar, NoLock, true); + if (!OidIsValid(relId)) { + ereport(LOG, + (errmsg("[On-demand] The relation doesn't exists, no need to redo, relname: %s.", ondemandParseInfo->relationRangeVar->relname))); + break; + } + Oid relationOid = IndexGetRelation(relId, true); + if (OidIsValid(relationOid)) { + PreRedoTableInOndemandRecovery(relationOid); + } else { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", relId))); + } + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_TABLE: { + switch (ondemandParseInfo->parseType) { + case T_AlterTableStmt: + case T_DropStmt: + case T_RenameStmt: + case T_ReindexStmt: + case T_AlterObjectSchemaStmt: { + Oid relId = RangeVarGetRelid(ondemandParseInfo->relationRangeVar, NoLock, true); + if (!OidIsValid(relId)) { + ereport(LOG, + (errmsg("[On-demand] The relation doesn't exists, no need to redo, relname: %s.", ondemandParseInfo->relationRangeVar->relname))); + break; + } + PreRedoTableInOndemandRecovery(relId); + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_TABLE_PARTITION: { + switch (ondemandParseInfo->parseType) { + case T_ReindexStmt: { + Oid relId = RangeVarGetRelid(ondemandParseInfo->relationRangeVar, NoLock, true); + if (!OidIsValid(relId)) { + ereport(LOG, + (errmsg("[On-demand] The relation doesn't exists, no need to redo, relname: %s.", ondemandParseInfo->relationRangeVar->relname))); + break; + } + PreRedoTableInOndemandRecovery(relId); + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + case OBJECT_VIEW: { + switch (ondemandParseInfo->parseType) { + case T_AlterTableStmt: + case T_DropStmt: + case T_RenameStmt: + case T_AlterObjectSchemaStmt: { + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + break; + } + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", + ondemandParseInfo->parseType, ondemandParseInfo->objectType))); + break; + } + } + } + list_free_deep(ondemandParseList); +} + +/** + * @brief Find out all the index and the partition of inex on of target relation, and pre + * redo the before execute ddl. + * + * @param relation target relation + * @return int + */ +int PreRedoIndexByRelation(Relation relation) { + Assert(RelationIsValid(relation)); + int indexNum = 0; + List* indexList = RelationGetIndexList(relation); + ListCell* indexCell = NULL; + + if (indexList == NULL || indexList->length == 0) { + return 0; + } + + foreach (indexCell, indexList) { + Oid indexOid = lfirst_oid(indexCell); + Relation indexRelation; + indexRelation = index_open(indexOid, ExclusiveLock); + + if (indexRelation->rd_rel->relkind != RELKIND_INDEX || RelationIsNonpartitioned(indexRelation)) { + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(indexRelation); + } + indexNum++; + } else { + List *partitionTupleList = NULL; + Partition partition = NULL; + partitionTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_INDEX_PARTITION, indexRelation->rd_id); + + Assert(PointerIsValid(partitionTupleList)); + ListCell *partitionCell = NULL; + foreach (partitionCell, partitionTupleList) { + HeapTuple tuple = (HeapTuple)lfirst(partitionCell); + Oid partitionId = HeapTupleGetOid(tuple); + partition = partitionOpen(indexRelation, partitionId, ExclusiveLock); + Relation partitionRelation = partitionGetRelation(indexRelation, partition); + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(partitionRelation); + } + releaseDummyRelation(&partitionRelation); + partitionClose(indexRelation, partition, ExclusiveLock); + } + list_free_deep(partitionTupleList); + } + + index_close(indexRelation, ExclusiveLock); + } + list_free(indexList); + + return indexNum; +} + +/** + * @brief Find out all the partition or relation of target relation, and pre + * redo the before execute ddl. + * + * @param relation + * @return int + */ +int PreRedoRelationByRelation(Relation relation) { + Assert(RelationIsValid(relation)); + int partitionNum = 0; + + if (!RELATION_IS_PARTITIONED(relation)) { + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(relation); + } + partitionNum++; + return partitionNum; + } + + List* partitionList = NULL; + if (RelationIsCommonPartitioned(relation)) { + partitionList = relationGetPartitionList(relation, ExclusiveLock); + } else { + partitionList = RelationGetSubPartitionList(relation, ExclusiveLock); + } + + ListCell* partitionCell = NULL; + if (partitionList == NULL) { + return partitionNum; + } + + foreach (partitionCell, partitionList) { + Partition partition = (Partition)lfirst(partitionCell); + Relation partitionRelation = partitionGetRelation(relation, partition); + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(partitionRelation); + } + releaseDummyRelation(&partitionRelation); + partitionNum++; + } + releasePartitionList(relation, &partitionList, ExclusiveLock); + + return partitionNum; +} + + +/** + * @brief Find out all the toast table of taget relation, and pre redo them before execute ddl. + * + * @param relation target relation + * @return int the num of toast table + */ +int PreRedoToastByRelation(Relation relation) { + Assert(RelationIsValid(relation)); + int toastNum = 0; + if (!RELATION_IS_PARTITIONED(relation)) { + Relation toastRelation = try_relation_open(relation->rd_rel->reltoastrelid, ExclusiveLock); + if (RelationIsValid(toastRelation)) { + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(toastRelation); + } + toastNum++; + heap_close(toastRelation, ExclusiveLock); + } + return toastNum; + } + + List* partitionList = NULL; + /* Get partition or subpartition list by relation */ + if (RelationIsCommonPartitioned(relation)) { + partitionList = relationGetPartitionList(relation, ExclusiveLock); + } else { + partitionList = RelationGetSubPartitionList(relation, ExclusiveLock); + } + + if (partitionList == NULL) { + return toastNum; + } + + ListCell* partitionCell = NULL; + foreach (partitionCell, partitionList) { + Partition partition = (Partition)lfirst(partitionCell); + Relation partitionRelation = partitionGetRelation(relation, partition); + if (!RelationIsValid(partitionRelation) || !OidIsValid(partitionRelation->rd_rel->reltoastrelid)) { + releaseDummyRelation(&partitionRelation); + continue; + } + + Relation toastRelation = heap_open(partitionRelation->rd_rel->reltoastrelid, ExclusiveLock); + if (!RelationIsValid(toastRelation)) { + releaseDummyRelation(&toastRelation); + continue; + } + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(toastRelation); + } + heap_close(toastRelation, ExclusiveLock); + releaseDummyRelation(&partitionRelation); + toastNum++; + } + releasePartitionList(relation, &partitionList, ExclusiveLock); + + return toastNum; +} + +/** + * @brief Redo for index In ondemand recvery by relId, before execute ddl. + * + * @param indexId oid of index. + */ +static void PreRedoIndexInOndemandRecovery(Oid indexId) { + int indexNum = 0; + + if (!SS_IN_ONDEMAND_RECOVERY) { + return; + } + + Relation indexRelation = index_open(indexId, ExclusiveLock); + if (!RelationIsValid(indexRelation)) { + return; + } + + /* If the index has no partition. */ + if (indexRelation->rd_rel->relkind != RELKIND_INDEX || RelationIsNonpartitioned(indexRelation)) { + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(indexRelation); + } + indexNum++; + /* If the index has partitions. */ + } else { + List *partitionTupleList = NULL; + Partition partition = NULL; + partitionTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_INDEX_PARTITION, indexRelation->rd_id); + + Assert(PointerIsValid(partitionTupleList)); + ListCell *partitionCell = NULL; + foreach (partitionCell, partitionTupleList) { + HeapTuple tuple = (HeapTuple)lfirst(partitionCell); + Oid partitionId = HeapTupleGetOid(tuple); + partition = partitionOpen(indexRelation, partitionId, ExclusiveLock); + Relation partitionRelation = partitionGetRelation(indexRelation, partition); + if (SS_IN_ONDEMAND_RECOVERY) { + RedoRelationForOndemandExtremeRTO(partitionRelation); + } + releaseDummyRelation(&partitionRelation); + partitionClose(indexRelation, partition, ExclusiveLock); + } + list_free_deep(partitionTupleList); + } + + index_close(indexRelation, ExclusiveLock); +} + +/** + * @brief Redo for table In ondemand recvery by relId, before execute ddl. + * + * @param relId Oid of relation + */ +static void PreRedoTableInOndemandRecovery(Oid relId) { + + if (!SS_IN_ONDEMAND_RECOVERY) { + return; + } + + Relation relation = heap_open(relId, ExclusiveLock); + if (!RelationIsValid(relation)) { + return; + } + + /* Check if support target relation type when enable DMS. If not, no need to redo. */ + if ((relation->rd_rel->relkind == RELKIND_RELATION && IsSegmentPhysicalRelNode(relation->rd_node)) || + relation->rd_rel->relkind == RELKIND_MATVIEW || + relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE || + RelationIsTableAccessMethodUStoreType(relation->rd_options) || + RelationIsCUFormat(relation) || + relation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED || + relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP || + relation->rd_rel->relpersistence == RELPERSISTENCE_GLOBAL_TEMP || + RowRelationIsCompressed(relation)) { + if (SS_IN_ONDEMAND_RECOVERY) { + heap_close(relation, ExclusiveLock); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("[On-demand]Only support segment storage type and ASTORE while DMS and DSS enable.\n" + "Foreign table, matview, temp table or unlogged table is not supported.\nCompression is not " + "supported."))); + } + } + + /* redo for indexs on relation */ + int indexNum = 0; + indexNum = PreRedoIndexByRelation(relation); + + /* redo for partitions on relation */ + int partitionNum = 0; + partitionNum = PreRedoRelationByRelation(relation); + + int toastNum = 0; + /* redo for toasts on relation */ + toastNum = PreRedoToastByRelation(relation); + + ereport(DEBUG1, + (errmsg("[On-demand] pre redo relation befor execute ddl, relId: %u, indexNum: %d, partitionNum/relationNum: %d, toastNum: %d.", + relId, indexNum, partitionNum, toastNum))); + + heap_close(relation, ExclusiveLock); +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index e690e16c74..2086e5de24 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -755,6 +755,7 @@ void ReleaseRecParseState(HTAB *redoItemHash, RedoItemHashEntry *redoItemEntry) unsigned int del_from_hash_item_num = 0; unsigned int new_hash; LWLock *xlog_partition_lock; + LWLock *scanningXLogTrackLock; /* Items that have been replayed(refcount == 0) can be released */ while (cur_state != NULL) { @@ -772,23 +773,36 @@ void ReleaseRecParseState(HTAB *redoItemHash, RedoItemHashEntry *redoItemEntry) new_hash = XlogTrackTableHashCode(&redoItemEntry->redoItemTag); xlog_partition_lock = XlogTrackMappingPartitionLock(new_hash); + scanningXLogTrackLock = XLogTrackMappingScanningLock(g_redoWorker->slotId); if (del_from_hash_item_num > 0) { Assert(releaseTailState != NULL); + if (SS_IN_ONDEMAND_RECOVERY) { + LWLockAcquire(scanningXLogTrackLock, LW_EXCLUSIVE); + } (void)LWLockAcquire(xlog_partition_lock, LW_EXCLUSIVE); redoItemEntry->head = (XLogRecParseState *)releaseTailState->nextrecord; releaseTailState->nextrecord = NULL; XLogBlockParseStateRelease(releaseHeadState); redoItemEntry->redoItemNum -= del_from_hash_item_num; LWLockRelease(xlog_partition_lock); + if (SS_IN_ONDEMAND_RECOVERY) { + LWLockRelease(scanningXLogTrackLock); + } } if (redoItemEntry->redoItemNum == 0) { + if (SS_IN_ONDEMAND_RECOVERY) { + LWLockAcquire(scanningXLogTrackLock, LW_EXCLUSIVE); + } (void)LWLockAcquire(xlog_partition_lock, LW_EXCLUSIVE); if (hash_search(redoItemHash, (void *)&redoItemEntry->redoItemTag, HASH_REMOVE, NULL) == NULL) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("redo item hash table corrupted"))); } LWLockRelease(xlog_partition_lock); + if (SS_IN_ONDEMAND_RECOVERY) { + LWLockRelease(scanningXLogTrackLock); + } } return; @@ -4056,7 +4070,7 @@ static void OndemandPauseRedoAndRequestPrimaryDoCkpt(OndemandCheckPauseCB activa static void OndemandLogHashMapUsedStatus() { ereport(LOG, (errcode(ERRCODE_LOG), - errmsg("[On-demand] hashmap usedblknum %lu, totalblknum %lu, pause value %lu, continue value %lu", + errmsg("[On-demand] hashmap usedblknum %u, totalblknum %u, pause value %u, continue value %u", pg_atomic_read_u32(&g_dispatcher->parseManager.memctl.usedblknum), g_dispatcher->parseManager.memctl.totalblknum, g_ondemandXLogParseMemFullValue, g_ondemandXLogParseMemCancelPauseVaule))); @@ -4065,7 +4079,7 @@ static void OndemandLogHashMapUsedStatus() static void OndemandLogTrxnQueueUsedStatus() { ereport(LOG, (errcode(ERRCODE_LOG), - errmsg("[On-demand] trxn queue usedblknum %lu, totalblknum %lu, pause value %lu", + errmsg("[On-demand] trxn queue usedblknum %u, totalblknum %u, pause value %u", SPSCGetQueueCount(g_dispatcher->trxnQueue), REALTIME_BUILD_RECORD_QUEUE_SIZE, g_ondemandRealtimeBuildQueueFullValue))); } @@ -4073,7 +4087,7 @@ static void OndemandLogTrxnQueueUsedStatus() static void OndemandLogSegQueueUsedStatus() { ereport(LOG, (errcode(ERRCODE_LOG), - errmsg("[On-demand] seg queue usedblknum %lu, totalblknum %lu, pause value %lu", + errmsg("[On-demand] seg queue usedblknum %u, totalblknum %u, pause value %u", SPSCGetQueueCount(g_dispatcher->segQueue), REALTIME_BUILD_RECORD_QUEUE_SIZE, g_ondemandRealtimeBuildQueueFullValue))); } diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index 801f5d65c1..b72af7fc53 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -279,6 +279,93 @@ void OndemandXLogParseBufferRelease(XLogRecParseState *recordstate) OndemandXLogMemRelease(memctl, descstate->buff_id); } +/** + * @brief scanning the hashmap by relationTag, to find out all the + * redoItem of target relation and read the buffer of each redoItem. + * The blockStates will be redone at RedoForOndemandExtremeRTOQuery if need. + * + * @param relation the relation need to be redone + * @return the redoEntry num of target relation + */ +long RedoRelationForOndemandExtremeRTO(Relation relation) { + long entryNum = 0; + Assert(RelationIsValid(relation)); + + RelFileNode relfilenode = relation->rd_node; + ondemand_extreme_rto::RedoItemTag relationTag; + INIT_REDO_ITEM_TAG(relationTag, relfilenode, 0, 0); + uint32 slotId = ondemand_extreme_rto::GetSlotId(relationTag.rNode, 0, 0, ondemand_extreme_rto::GetBatchCount()); + ondemand_extreme_rto::RedoItemHashEntry *redoItemEntry = NULL; + HASH_SEQ_STATUS status; + + HTAB *hashMap = g_instance.comm_cxt.predo_cxt.redoItemHashCtrl[slotId]->hTab; + if (hashMap == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("redo item hash table corrupted, there has invalid hashtable."))); + } + + hash_seq_init(&status, hashMap); + LWLock* scanningXLogTrackLock = XLogTrackMappingScanningLock(slotId); + LWLockAcquire(scanningXLogTrackLock, LW_SHARED); + + while ((redoItemEntry = (ondemand_extreme_rto::RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { + ondemand_extreme_rto::RedoItemTag redoItemTag = redoItemEntry->redoItemTag; + // Check if this redoItemEntry belong to the target relation. + if (!RelFileNodeRelEquals(redoItemTag.rNode, relationTag.rNode)) { + continue; + } + + Buffer buf = ReadBufferExtended(relation, redoItemTag.forkNum, redoItemTag.blockNum, RBM_NORMAL, NULL); + + ReleaseBuffer(buf); + entryNum++; + } + LWLockRelease(scanningXLogTrackLock); + return entryNum; +} + +/** + * @brief scanning all the hashmap of each pipline, to find out all the + * redoItem of target database and read the buffer of each redoItem. + * The blockState will be redone at RedoForOndemandExtremeRTOQuery if need. + * + * @param dbId the dbNode of database + * @return the redoEntry num of target database + */ +long RedoDatabaseForOndemandExtremeRTO(Oid dbNode) { + long entryNum = 0; + Assert(OidIsValid(dbNode)); + + // Search the hashmap of each piplines. + uint32 batchCount = ondemand_extreme_rto::GetBatchCount(); + ondemand_extreme_rto::RedoItemHashEntry *redoItemEntry = NULL; + for (uint32 slotId = 0; slotId < batchCount; slotId++) { + HASH_SEQ_STATUS status; + HTAB *hashMap = g_instance.comm_cxt.predo_cxt.redoItemHashCtrl[slotId]->hTab; + if (hashMap == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("redo item hash table corrupted, there has invalid hashtable."))); + } + + hash_seq_init(&status, hashMap); + LWLock* scanningXLogTrackLock = XLogTrackMappingScanningLock(slotId); + LWLockAcquire(scanningXLogTrackLock, LW_SHARED); + + while ((redoItemEntry = (ondemand_extreme_rto::RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { + ondemand_extreme_rto::RedoItemTag redoItemTag = redoItemEntry->redoItemTag; + // Check if this redoItemEntry belong to the target namespace. + if (redoItemEntry->redoDone || redoItemTag.rNode.dbNode != dbNode) { + continue; + } + Buffer buffer = ReadBufferWithoutRelcache(redoItemTag.rNode, redoItemTag.forkNum, redoItemTag.blockNum, RBM_NORMAL, NULL, NULL); + ReleaseBuffer(buffer); + entryNum++; + } + LWLockRelease(scanningXLogTrackLock); + } + return entryNum; +} + BufferDesc *RedoForOndemandExtremeRTOQuery(BufferDesc *bufHdr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode) { @@ -467,8 +554,14 @@ static XLogRecParseState *OndemandFindTargetBlockStateInOndemandRedo(XLogRecPars return targetState; } -// only used in ondemand redo stage -XLogRecParseState *OndemandRedoReloadXLogRecord(XLogRecParseState *redoblockstate) +/** + * @brief Reload redoItem according to XLogRecParseState, the redoItem has been released before + * push into hashmap, so reload it before redo. + * + * @param hashmapBlockState the blockState got from hashmap + * @return XLogRecParseState* blockState reload from disk + */ +XLogRecParseState *OndemandRedoReloadXLogRecord(XLogRecParseState *hashmapBlockState) { uint32 blockNum = 0; char *errormsg = NULL; @@ -481,18 +574,18 @@ XLogRecParseState *OndemandRedoReloadXLogRecord(XLogRecParseState *redoblockstat XLogReaderState *xlogreader = XLogReaderAllocate(&SimpleXLogPageReadInFdCache, &readPrivate); // do not use pre-read // step1: read record - XLogRecord *record = XLogReadRecord(xlogreader, redoblockstate->blockparse.blockhead.start_ptr, &errormsg, + XLogRecord *record = XLogReadRecord(xlogreader, hashmapBlockState->blockparse.blockhead.start_ptr, &errormsg, true, g_instance.dms_cxt.SSRecoveryInfo.recovery_xlog_dir); if (record == NULL) { ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("[On-demand] reload xlog record failed at %X/%X, spc/db/rel/bucket " "fork-block: %u/%u/%u/%d %d-%u, errormsg: %s", - (uint32)(recordBlockState->blockparse.blockhead.start_ptr >> 32), - (uint32)recordBlockState->blockparse.blockhead.start_ptr, - recordBlockState->blockparse.blockhead.spcNode, recordBlockState->blockparse.blockhead.dbNode, - recordBlockState->blockparse.blockhead.relNode, - recordBlockState->blockparse.blockhead.bucketNode, - recordBlockState->blockparse.blockhead.forknum, recordBlockState->blockparse.blockhead.blkno, + (uint32)(hashmapBlockState->blockparse.blockhead.start_ptr >> 32), + (uint32)hashmapBlockState->blockparse.blockhead.start_ptr, + hashmapBlockState->blockparse.blockhead.spcNode, hashmapBlockState->blockparse.blockhead.dbNode, + hashmapBlockState->blockparse.blockhead.relNode, + hashmapBlockState->blockparse.blockhead.bucketNode, + hashmapBlockState->blockparse.blockhead.forknum, hashmapBlockState->blockparse.blockhead.blkno, errormsg))); } @@ -506,7 +599,7 @@ XLogRecParseState *OndemandRedoReloadXLogRecord(XLogRecParseState *redoblockstat } while (true); // step3: find target parse state - XLogRecParseState *targetState = OndemandFindTargetBlockStateInOndemandRedo(recordBlockState, redoblockstate); + XLogRecParseState *targetState = OndemandFindTargetBlockStateInOndemandRedo(recordBlockState, hashmapBlockState); if (targetState == NULL) { ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("[On-demand] reload xlog record failed at %X/%X, spc/db/rel/bucket " diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 60855f7992..31b2b8ab9f 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -20016,7 +20016,6 @@ static int SSReadXLog(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int /* Read the requested page */ t_thrd.xlog_cxt.readOff = targetPageOff; - if (xlogreader->preReadBuf == NULL) { actualBytes = (uint32)pread(t_thrd.xlog_cxt.readFile, readBuf, t_thrd.xlog_cxt.readLen, t_thrd.xlog_cxt.readOff); } else { diff --git a/src/gausskernel/storage/lmgr/lwlock.cpp b/src/gausskernel/storage/lmgr/lwlock.cpp index 14722b4ad9..a8733d3808 100644 --- a/src/gausskernel/storage/lmgr/lwlock.cpp +++ b/src/gausskernel/storage/lmgr/lwlock.cpp @@ -204,7 +204,8 @@ static const char *BuiltinTrancheNames[] = { "SSTxnStatusCachePartLock", "SSSnapshotXminCachePartLock", "DmsBufCtrlLock", - "WalSyncRepWaitLock" + "WalSyncRepWaitLock", + "ScaningXLogTrackLock" }; static void RegisterLWLockTranches(void); @@ -454,6 +455,9 @@ int NumLWLocks(void) /* for ss txnstatus hash table */ numLocks += NUM_TXNSTATUS_CACHE_PARTITIONS; + /* for scanning xlog track hash table */ + numLocks += NUM_SCANNING_XLOG_TRACK_PARTITIONS; + /* * Add any requested by loadable modules; for backwards-compatibility * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if @@ -676,6 +680,10 @@ static void InitializeLWLocks(int numLocks) LWLockInitialize(&lock->lock, LWTRANCHE_SS_SNAPSHOT_XMIN_PARTITION); } + for (id = 0; id < NUM_SCANNING_XLOG_TRACK_PARTITIONS; id++, lock++) { + LWLockInitialize(&lock->lock, LWTRANCHE_SCANNING_XLOG_TRACK); + } + Assert((lock - t_thrd.shemem_ptr_cxt.mainLWLockArray) == NumFixedLWLocks); for (id = NumFixedLWLocks; id < numLocks; id++, lock++) { diff --git a/src/include/access/ondemand_extreme_rto/batch_redo.h b/src/include/access/ondemand_extreme_rto/batch_redo.h index b995c26f52..78eee53dd3 100644 --- a/src/include/access/ondemand_extreme_rto/batch_redo.h +++ b/src/include/access/ondemand_extreme_rto/batch_redo.h @@ -49,6 +49,8 @@ namespace ondemand_extreme_rto { #define XlogTrackTableHashPartition(hashcode) ((hashcode) % NUM_XLOG_TRACK_PARTITIONS) #define XlogTrackMappingPartitionLock(hashcode) \ (&t_thrd.shemem_ptr_cxt.mainLWLockArray[FirstXlogTrackLock + XlogTrackTableHashPartition(hashcode)].lock) +#define XLogTrackMappingScanningLock(slotId) \ + (&t_thrd.shemem_ptr_cxt.mainLWLockArray[FirstScanningXLOGTrackLock + (slotId)].lock) /* * Note: if there are any pad bytes in the struct, INIT_RedoItemTag have diff --git a/src/include/storage/buf/bufmgr.h b/src/include/storage/buf/bufmgr.h index 7962002131..63a8f9bceb 100644 --- a/src/include/storage/buf/bufmgr.h +++ b/src/include/storage/buf/bufmgr.h @@ -429,6 +429,8 @@ extern bool StartBufferIO(BufferDesc* buf, bool forInput); extern Buffer ReadBuffer_common_for_dms(ReadBufferMode readmode, BufferDesc *bufDesc, const XLogPhyBlock *pblk); extern void ReadBuffer_common_for_check(ReadBufferMode readmode, BufferDesc* buf_desc, const XLogPhyBlock *pblk, Block bufBlock); +extern long RedoRelationForOndemandExtremeRTO(Relation relation); +extern long RedoDatabaseForOndemandExtremeRTO(Oid dbId); extern BufferDesc *RedoForOndemandExtremeRTOQuery(BufferDesc *bufHdr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode); extern Buffer standby_read_buf(Relation reln, ForkNumber fork_num, BlockNumber block_num, ReadBufferMode mode, diff --git a/src/include/storage/lock/lwlock.h b/src/include/storage/lock/lwlock.h index 633f9e713f..63972fd490 100644 --- a/src/include/storage/lock/lwlock.h +++ b/src/include/storage/lock/lwlock.h @@ -154,6 +154,9 @@ const struct LWLOCK_PARTITION_DESC LWLockPartInfo[] = { /* Number of partitions of the snapshot xmin cache hashtable */ #define NUM_SS_SNAPSHOT_XMIN_CACHE_PARTITIONS 32 +/* Number of xlog track hashmap, each ondemand extro pipline has one */ +#define NUM_SCANNING_XLOG_TRACK_PARTITIONS (g_instance.attr.attr_storage.recovery_parse_workers) + /* * WARNING---Please keep the order of LWLockTrunkOffset and BuiltinTrancheIds consistent!!! */ @@ -205,8 +208,11 @@ const struct LWLOCK_PARTITION_DESC LWLockPartInfo[] = { #define FirstTxnStatusCacheLock (FirstXlogTrackLock + NUM_XLOG_TRACK_PARTITIONS) /* shared-storage snapshot xmin cache*/ #define FirstSSSnapshotXminCacheLock (FirstTxnStatusCacheLock + NUM_TXNSTATUS_CACHE_PARTITIONS) +/* xlog track hashmap */ +#define FirstScanningXLOGTrackLock (FirstSSSnapshotXminCacheLock + NUM_SS_SNAPSHOT_XMIN_CACHE_PARTITIONS) + /* must be last: */ -#define NumFixedLWLocks (FirstSSSnapshotXminCacheLock + NUM_SS_SNAPSHOT_XMIN_CACHE_PARTITIONS) +#define NumFixedLWLocks (FirstScanningXLOGTrackLock + NUM_SCANNING_XLOG_TRACK_PARTITIONS) /* * WARNING----Please keep BuiltinTrancheIds and BuiltinTrancheNames consistent!!! * @@ -290,6 +296,7 @@ enum BuiltinTrancheIds LWTRANCHE_SS_SNAPSHOT_XMIN_PARTITION, LWTRANCHE_DMS_BUF_CTRL, LWTRANCHE_SYNCREP_WAIT, + LWTRANCHE_SCANNING_XLOG_TRACK, /* * Each trancheId above should have a corresponding item in BuiltinTrancheNames; */ -- Gitee From 68db8a7947c3f39baca601b6f2ff8aaab874d463 Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Wed, 31 Jul 2024 09:49:25 +0800 Subject: [PATCH 119/347] =?UTF-8?q?default=E6=B7=BB=E5=8A=A0=E4=BB=A5f/d?= =?UTF-8?q?=E7=BB=93=E5=B0=BE=E7=9A=84=E6=B5=AE=E7=82=B9=E6=95=B0=E6=94=AF?= =?UTF-8?q?=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/gram.y | 18 +++++++++++ .../interfaces/libpq/frontend_parser/gram.y | 18 +++++++++++ .../regress/expected/test_binary_suffix.out | 31 +++++++++++++++++++ src/test/regress/sql/test_binary_suffix.sql | 21 +++++++++++++ 4 files changed, 88 insertions(+) diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 3a00caa610..f31301087f 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -8078,6 +8078,24 @@ ColConstraintElem: n->cooked_expr = NULL; $$ = (Node *)n; } + | DEFAULT FCONST_F + { + Constraint *n = makeNode(Constraint); + n->contype = CONSTR_DEFAULT; + n->location = @1; + n->raw_expr = makeFloatConst($2, @2); + n->cooked_expr = NULL; + $$ = (Node *)n; + } + | DEFAULT FCONST_D + { + Constraint *n = makeNode(Constraint); + n->contype = CONSTR_DEFAULT; + n->location = @1; + n->raw_expr = makeFloatConst($2, @2);; + n->cooked_expr = NULL; + $$ = (Node *)n; + } | ON_UPDATE_TIME UPDATE b_expr { #ifndef ENABLE_MULTIPLE_NODES diff --git a/src/common/interfaces/libpq/frontend_parser/gram.y b/src/common/interfaces/libpq/frontend_parser/gram.y index 8d8fa45252..ad545adee1 100755 --- a/src/common/interfaces/libpq/frontend_parser/gram.y +++ b/src/common/interfaces/libpq/frontend_parser/gram.y @@ -5560,6 +5560,24 @@ ColConstraintElem: n->cooked_expr = NULL; $$ = (Node *)n; } + | DEFAULT FCONST_F + { + Constraint *n = makeNode(Constraint); + n->contype = CONSTR_DEFAULT; + n->location = @1; + n->raw_expr = makeFloatConst($2, @2); + n->cooked_expr = NULL; + $$ = (Node *)n; + } + | DEFAULT FCONST_D + { + Constraint *n = makeNode(Constraint); + n->contype = CONSTR_DEFAULT; + n->location = @1; + n->raw_expr = makeFloatConst($2, @2);; + n->cooked_expr = NULL; + $$ = (Node *)n; + } | GENERATED ALWAYS AS '(' a_expr ')' STORED { #ifdef ENABLE_MULTIPLE_NODES diff --git a/src/test/regress/expected/test_binary_suffix.out b/src/test/regress/expected/test_binary_suffix.out index 829f3d54d5..c222e3eec3 100644 --- a/src/test/regress/expected/test_binary_suffix.out +++ b/src/test/regress/expected/test_binary_suffix.out @@ -950,5 +950,36 @@ create type binary_double_nan AS ( ); drop type binary_double_nan; set disable_keyword_options = ''; +set float_suffix_acceptance = on; +CREATE TABLE employees ( +emp_id INT PRIMARY KEY, +emp_name VARCHAR(100) NOT NULL, +emp_salary DECIMAL(10, 2) DEFAULT 3.14f, +hire_date DATE DEFAULT CURRENT_DATE +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "employees_pkey" for table "employees" +INSERT INTO employees (emp_id, emp_name, hire_date) VALUES (1, 'John Doe', '2024-01-01'); +SELECT * FROM employees; + emp_id | emp_name | emp_salary | hire_date +--------+----------+------------+-------------------------- + 1 | John Doe | 3.14 | Mon Jan 01 00:00:00 2024 +(1 row) + +DROP TABLE employees; +CREATE TABLE employees ( +emp_id INT PRIMARY KEY, +emp_name VARCHAR(100) NOT NULL, +emp_salary DECIMAL(10, 2) DEFAULT 3.14d, +hire_date DATE DEFAULT CURRENT_DATE +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "employees_pkey" for table "employees" +INSERT INTO employees (emp_id, emp_name, hire_date) VALUES (1, 'John Doe', '2024-01-01'); +SELECT * FROM employees; + emp_id | emp_name | emp_salary | hire_date +--------+----------+------------+-------------------------- + 1 | John Doe | 3.14 | Mon Jan 01 00:00:00 2024 +(1 row) + +DROP TABLE employees; drop schema if exists test_binary cascade; NOTICE: drop cascades to type "binary_double_infinity" diff --git a/src/test/regress/sql/test_binary_suffix.sql b/src/test/regress/sql/test_binary_suffix.sql index b253461562..f09853aefc 100644 --- a/src/test/regress/sql/test_binary_suffix.sql +++ b/src/test/regress/sql/test_binary_suffix.sql @@ -362,4 +362,25 @@ create type binary_double_nan AS ( drop type binary_double_nan; set disable_keyword_options = ''; +set float_suffix_acceptance = on; +CREATE TABLE employees ( +emp_id INT PRIMARY KEY, +emp_name VARCHAR(100) NOT NULL, +emp_salary DECIMAL(10, 2) DEFAULT 3.14f, +hire_date DATE DEFAULT CURRENT_DATE +); +INSERT INTO employees (emp_id, emp_name, hire_date) VALUES (1, 'John Doe', '2024-01-01'); +SELECT * FROM employees; +DROP TABLE employees; + +CREATE TABLE employees ( +emp_id INT PRIMARY KEY, +emp_name VARCHAR(100) NOT NULL, +emp_salary DECIMAL(10, 2) DEFAULT 3.14d, +hire_date DATE DEFAULT CURRENT_DATE +); +INSERT INTO employees (emp_id, emp_name, hire_date) VALUES (1, 'John Doe', '2024-01-01'); +SELECT * FROM employees; +DROP TABLE employees; + drop schema if exists test_binary cascade; -- Gitee From 4d2cfad94d200bffea4d619a1ddd3f437a07a291 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=AE=E5=82=A8-=E7=8E=8B=E5=BB=BA=E8=BE=BE?= Date: Tue, 30 Jul 2024 15:08:03 +0800 Subject: [PATCH 120/347] pg_audit 6.0 --- src/common/backend/catalog/builtin_funcs.ini | 4 +- src/common/backend/utils/init/globals.cpp | 3 +- .../process/postmaster/pgaudit.cpp | 255 +++++++++++++++--- .../rollback-post_catalog_maindb_92_946.sql | 39 +++ .../rollback-post_catalog_otherdb_92_946.sql | 39 +++ .../upgrade-post_catalog_maindb_92_946.sql | 43 +++ .../upgrade-post_catalog_otherdb_92_946.sql | 43 +++ src/include/miscadmin.h | 1 + src/include/pgaudit.h | 4 +- 9 files changed, 396 insertions(+), 35 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 76322cf17b..8f0408791e 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -8583,8 +8583,8 @@ ), AddFuncGroup( "pg_query_audit", 2, - AddBuiltinFunc(_0(3780), _1("pg_query_audit"), _2(2), _3(false), _4(true), _5(pg_query_audit), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 1184, 1184), _21(15, 1184, 1184, 1184, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25), _22(15, 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(15, "begin", "end", "time", "type", "result", "userid", "username", "database", "client_conninfo", "object_name", "detail_info", "node_name", "thread_id", "local_port", "remote_port"), _24(NULL), _25("pg_query_audit"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(3782), _1("pg_query_audit"), _2(3), _3(false), _4(true), _5(pg_query_audit), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 1184, 1184, 25), _21(16, 1184, 1184, 25, 1184, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25), _22(16, 'i', 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(16, "begin", "end", "directory", "time", "type", "result", "userid", "username", "database", "client_conninfo", "object_name", "detail_info", "node_name", "thread_id", "local_port", "remote_port"), _24(NULL), _25("pg_query_audit"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(3780), _1("pg_query_audit"), _2(2), _3(false), _4(true), _5(pg_query_audit), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 1184, 1184), _21(17, 1184, 1184, 1184, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 16), _22(17, 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(17, "begin", "end", "time", "type", "result", "userid", "username", "database", "client_conninfo", "object_name", "detail_info", "node_name", "thread_id", "local_port", "remote_port", "sha_code", "verify_result"), _24(NULL), _25("pg_query_audit"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(3782), _1("pg_query_audit"), _2(3), _3(false), _4(true), _5(pg_query_audit), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 1184, 1184, 25), _21(18, 1184, 1184, 25, 1184, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 16), _22(18, 'i', 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(18, "begin", "end", "directory", "time", "type", "result", "userid", "username", "database", "client_conninfo", "object_name", "detail_info", "node_name", "thread_id", "local_port", "remote_port", "sha_code", "verify_result"), _24(NULL), _25("pg_query_audit"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "pg_read_binary_file", 2, diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index d1244a9b8a..dda1df83ac 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,12 +76,13 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92945; +const uint32 GRAND_VERSION_NUM = 92946; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 AUDIT_SHA_VERSION_NUM = 92946; const uint32 NETTIME_TRACE_VERSION_NUM = 92945; const uint32 HBA_CONF_VERSION_NUM = 92944; const uint32 PARALLEL_ENABLE_VERSION_NUM = 92941; diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp index f223a42830..1c32aa2a0a 100755 --- a/src/gausskernel/process/postmaster/pgaudit.cpp +++ b/src/gausskernel/process/postmaster/pgaudit.cpp @@ -30,6 +30,7 @@ #include "lib/stringinfo.h" #include "libpq/libpq-be.h" #include "libpq/pqsignal.h" +#include "libpq/sha2.h" #include "funcapi.h" #include "miscadmin.h" #include "nodes/pg_list.h" @@ -57,6 +58,25 @@ #include #include +#include +#include + +#ifdef HAVE_STDIO_H +#include +#endif + +#ifdef HAVE_STDLIB_H +#include +#endif + +#ifdef HVE_STRING_H +#include +#endif + +#ifdef HAVE_OPENSSL_SHA_H +#include +#endif + #ifdef ENABLE_UT #define static #endif @@ -333,6 +353,7 @@ typedef struct AuditData { #define PGAUDIT_RESTART_INTERVAL 60 #define PGAUDIT_QUERY_COLS 13 +#define PGAUDIT_QUERY_COLS_NEW 15 #define MAXNUMLEN 16 @@ -340,6 +361,11 @@ typedef struct AuditData { #define WRITE_TO_STDAUDITFILE(ctype) (t_thrd.role == AUDITOR && ctype == STD_AUDIT_TYPE) #define WRITE_TO_UNIAUDITFILE(ctype) (t_thrd.role == AUDITOR && ctype == UNIFIED_AUDIT_TYPE) +#define MAX_DATA_LEN 1024 /*sha data len*/ +#define SHA256_LENTH 32 /*sha length*/ +#define SHA256_HEX_LENTH 512 /*sha hex length*/ +#define SHA_LOG_MAX_TIMELEN 80 /*sha date length*/ + struct AuditEventInfo { AuditEventInfo() : userid{0}, username(NULL), @@ -406,13 +432,15 @@ static void pgaudit_rewrite_indexfile(void); static void pgaudit_indextbl_init_new(void); static void pgaudit_reset_indexfile(); static const char* pgaudit_string_field(AuditData* adata, int num); -static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS], +static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS_NEW], AuditData *adata, - const AuditMsgHdr &header); + const AuditMsgHdr &header, + bool nulls[PGAUDIT_QUERY_COLS_NEW], + bool newVersion); static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 fnum, TimestampTz begtime, - TimestampTz endtime, const char *audit_directory); + TimestampTz endtime, const char *audit_directory, bool newVersion); static TimestampTz pgaudit_headertime(uint32 fnum, const char *audit_directory); -static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallInfoData *fcinfo, TupleDesc &tupdesc); +static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallInfoData *fcinfo, TupleDesc &tupdesc, bool newVersion); static uint32 pgaudit_get_auditfile_num(); static void pgaudit_update_auditfile_time(pg_time_t timestamp, bool exist); @@ -438,11 +466,15 @@ inline bool pgaudit_need_check_size_rotation() /********** toughness *********/ static void CheckAuditFile(void); -static bool pgaudit_invalid_header(const AuditMsgHdr* header); +static bool pgaudit_invalid_header(const AuditMsgHdr* header, bool newVersion); static void pgaudit_mark_corrupt_info(uint32 fnum); static void audit_append_xid_info(const char *detail_info, char *detail_info_xid, uint32 len); static bool audit_status_check_ok(); - +/*audit sha code*/ +static bool pgaudit_need_sha_code(); +static void generate_audit_sha_code(pg_time_t time, const char* type, const char* result, char *userid, const char* username, const char* dbname, char* client_info, \ + const char *object_name, const char *detail_info, const char* nodename, char* threadid, char* localport, \ + char* remoteport, unsigned char* shacode); static void init_audit_signal_handlers() { (void)gspqsignal(SIGHUP, sigHupHandler); /* set flag to read config file */ @@ -564,6 +596,53 @@ static void sig_thread_config_handler(int ¤tAuditRotationAge, int ¤t t_thrd.audit.rotation_requested = true; } } + +/* audit sha code version: finished upgrade*/ +static bool pgaudit_need_sha_code() +{ + if (t_thrd.proc == NULL) { + return false; + } + return t_thrd.proc->workingVersionNum >= AUDIT_SHA_VERSION_NUM; +} + +/* + * Brief : audit sha code + * Description : audit sha code + * the fileds are arraged as below sequence, Note it's not liable to modify them as to keep compatibility of version + * time|type|result|userid|username|dbname|client_info|object_name|detail_info|nodename|threadid|localport|remoteport + */ +static void generate_audit_sha_code(pg_time_t time, AuditType type, AuditResult result, char* userid, const char* username, const char* dbname, char* client_info, + const char* object_name, const char* detail_info, const char* nodename, char* threadid, char* localport, + char* remoteport, unsigned char* shacode) +{ + char timeTzLocaltime[SHA_LOG_MAX_TIMELEN] = {0}; + struct tm* system; + system = localtime(&time); + if (system != nullptr) { + (void)strftime(timeTzLocaltime, SHA_LOG_MAX_TIMELEN, "%Y-%m-%d_%H%M%S", system); + } + userid = (userid != NULL && userid[0] != '\0') ? userid : NULL; + username = (username != NULL && username[0] != '\0') ? username : NULL; + dbname = (dbname != NULL && dbname[0] != '\0') ? dbname : NULL; + client_info = (client_info != NULL && client_info[0] != '\0') ? client_info : NULL; + object_name = (object_name != NULL && object_name[0] != '\0') ? object_name : NULL; + detail_info = (detail_info != NULL && detail_info[0] != '\0') ? detail_info : NULL; + nodename = (nodename != NULL && nodename[0] != '\0') ? nodename : NULL; + threadid = (threadid != NULL && threadid[0] != '\0') ? threadid : NULL; + localport = (localport != NULL && localport[0] != '\0') ? localport : NULL; + remoteport = (remoteport != NULL && remoteport[0] != '\0') ? remoteport : NULL; + // TimestampTz timeTz = time_t_to_timestamptz(time); + StringInfoData str; + initStringInfo(&str); + appendStringInfo(&str, "%s | %d | %d | %s | %s | %s | %s | %s | %s | %s | %s | %s | %s", + timeTzLocaltime, type, result, userid, username, dbname, client_info, object_name, + detail_info, nodename, threadid, localport, remoteport); + SHA256((const unsigned char*)str.data , str.len, shacode); + pfree_ext(str.data); + return; +} + /* * Main entry point for auditor process * argc/argv parameters are valid only in EXEC_BACKEND case. @@ -1992,6 +2071,7 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c StringInfoData buf; AuditData adata; AuditEventInfo event_info; + unsigned char shacode[SHA256_HEX_LENTH] = {0}; if (!audit_get_clientinfo(type, object_name, event_info)) { return; } @@ -2020,12 +2100,26 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c adata.header.signature[0] = 'A'; adata.header.signature[1] = 'U'; adata.header.version = 0; - adata.header.fields = PGAUDIT_QUERY_COLS; + if (pgaudit_need_sha_code()) { + adata.header.fields = PGAUDIT_QUERY_COLS_NEW; + } else { + adata.header.fields = PGAUDIT_QUERY_COLS; + } adata.header.flags = AUDIT_TUPLE_NORMAL; adata.header.time = current_timestamp(); adata.header.size = 0; adata.type = type; adata.result = result; + char hexbuf[SHA256_HEX_LENTH]={0}; + /*type result format*/ + if (pgaudit_need_sha_code()) { + /*sha code for audit*/ + generate_audit_sha_code(adata.header.time / 1000, type, result, userid, username, dbname, client_info, object_name, + detail_info, g_instance.attr.attr_common.PGXCNodeName, threadid, localport, + remoteport, shacode); + /*sha code convert to hex*/ + sha_bytes_to_hex64((uint8*)shacode, hexbuf); + } initStringInfo(&buf); appendBinaryStringInfo(&buf, (char*)&adata, AUDIT_HEADER_SIZE); @@ -2040,6 +2134,9 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c appendStringField(&buf, (threadid[0] != '\0') ? threadid : NULL); appendStringField(&buf, (localport[0] != '\0') ? localport : NULL); appendStringField(&buf, (remoteport[0] != '\0') ? remoteport : NULL); + if (pgaudit_need_sha_code()) { + appendStringField(&buf, (shacode[0] != '\0') ? (const char*)hexbuf : NULL); + } /* * Use the chunking protocol if we know the syslogger should be @@ -2651,6 +2748,8 @@ static char* serialize_event_to_json(AuditData *adata, long long eventTime) WRITE_JSON_STRING(localPortInfo); event.remotePortInfo = pgaudit_string_field(adata, AUDIT_REMOTEPORT_INFO); WRITE_JSON_STRING(remotePortInfo); + event.shaCode = pgaudit_string_field(adata, AUDIT_SHACODE); + WRITE_JSON_STRING(shaCode); event.eventTime = eventTime; WRITE_JSON_INT(eventTime); WRITE_JSON_END(); @@ -2721,7 +2820,7 @@ static void pgaudit_query_file_for_elastic() if (header.signature[0] != 'A' || header.signature[1] != 'U' || header.version != 0 || - header.fields != PGAUDIT_QUERY_COLS || + (header.fields != PGAUDIT_QUERY_COLS && header.fields != PGAUDIT_QUERY_COLS_NEW) || (header.size <= sizeof(AuditMsgHdr))) { ereport(LOG, (errmsg("invalid data in audit file \"%s\"", file_path))); break; @@ -2799,18 +2898,36 @@ static void pgaudit_query_file_for_elastic() /* * Brief : scan the specified audit file into tuple * Description : Note we use old/new version to differ whether there is user_id field in the file. - * for expanding new field later, maybe we will depend on version id to implement + * for expanding new field later, maybe we will depend on version id to implement * backward compatibility but not bool variable */ -static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS], - AuditData *adata, - const AuditMsgHdr &header) -{ +static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS_NEW], + AuditData *adata, + const AuditMsgHdr &header, + bool nulls[PGAUDIT_QUERY_COLS_NEW], + bool newVersion) +{ + /*sha param*/ + char* userid = NULL; + const char* username =NULL; + const char* dbname = NULL; + char* client_info = NULL; + const char* object_name = NULL; + const char* detail_info =NULL; + const char* nodename = NULL; + char* threadid = NULL; + char* localport = NULL; + char* remoteport = NULL; + unsigned char shacode[SHA256_HEX_LENTH] = {0}; + const char* saved_hexbuf = NULL; + char hexbuf[SHA256_HEX_LENTH]={0}; + /* append timestamp info to data tuple */ int i = 0; values[i++] = TimestampTzGetDatum(time_t_to_timestamptz(adata->header.time)); values[i++] = CStringGetTextDatum(AuditTypeDesc(adata->type)); values[i++] = CStringGetTextDatum(AuditResultDesc(adata->result)); + // values[i++] = CStringGetTextDatum((const char*)adata->shacode); /* * new format of the audit file under correct record @@ -2818,33 +2935,89 @@ static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS], */ int index_field = 0; const char* field = NULL; - bool new_version = (header.fields == PGAUDIT_QUERY_COLS); + + bool new_version = (header.fields == PGAUDIT_QUERY_COLS || header.fields == PGAUDIT_QUERY_COLS_NEW); field = new_version ? pgaudit_string_field(adata, index_field++) : NULL; + userid = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* user id */ + field = pgaudit_string_field(adata, index_field++); + username = (const char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* user name */ + field = pgaudit_string_field(adata, index_field++); + dbname = (const char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* dbname */ + field = pgaudit_string_field(adata, index_field++); + client_info = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* client info */ + field = pgaudit_string_field(adata, index_field++); + if (field != NULL) { + object_name = (const char*)field; + } values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* object name */ + field = pgaudit_string_field(adata, index_field++); + detail_info = (const char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* detail info */ + field = pgaudit_string_field(adata, index_field++); + nodename = (const char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* node name */ + field = pgaudit_string_field(adata, index_field++); + threadid = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* thread id */ + field = pgaudit_string_field(adata, index_field++); + localport = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* local port */ + field = pgaudit_string_field(adata, index_field++); + remoteport = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* remote port */ - Assert(i == PGAUDIT_QUERY_COLS); + if (header.fields == PGAUDIT_QUERY_COLS_NEW) { + field = pgaudit_string_field(adata, index_field++); /*old version audit data, not sha_code*/ + } else { + field = NULL; + } + values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* sha_code hex data*/ + if (pgaudit_need_sha_code()) { + saved_hexbuf = field; + if (header.fields == PGAUDIT_QUERY_COLS_NEW) { + if (saved_hexbuf != NULL && saved_hexbuf[0] != '\0') { + bool verifyResult = false; + /*sha code for audit*/ + generate_audit_sha_code(adata->header.time, adata->type, adata->result, userid, username, dbname, client_info, object_name, + detail_info, nodename, threadid, localport, remoteport, shacode); + /*sha code convert to hex*/ + sha_bytes_to_hex64((uint8*)shacode, hexbuf); + if (strcmp((const char*)hexbuf, (const char*)saved_hexbuf) == 0) { + verifyResult = true; + } + values[i++] = BoolGetDatum(verifyResult); /* verify_result */ + } else { + values[i++] = CStringGetTextDatum(FILED_NULLABLE(NULL)); /* verify_result*/ + nulls[i] = true; + } + } else { + values[i++] = CStringGetTextDatum(FILED_NULLABLE(NULL)); /* verify_result*/ + nulls[i] = true; + } + } + if (newVersion) { + Assert(i == PGAUDIT_QUERY_COLS_NEW); + } else { + Assert(i == PGAUDIT_QUERY_COLS); + } } + static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 fnum, TimestampTz begtime, - TimestampTz endtime, const char *audit_directory) + TimestampTz endtime, const char *audit_directory, bool newVersion) { FILE* fp = NULL; size_t nread = 0; @@ -2867,8 +3040,9 @@ static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 f } do { - Datum values[PGAUDIT_QUERY_COLS] = {0}; - bool nulls[PGAUDIT_QUERY_COLS] = {0}; + Datum values[PGAUDIT_QUERY_COLS_NEW] = {0}; + bool nulls[PGAUDIT_QUERY_COLS_NEW] = {0}; + errno_t errorno = EOK; /* * two scenarios tell that the audit file corrupt @@ -2880,7 +3054,7 @@ static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 f } (void)fseek(fp, -1, SEEK_CUR); size_t header_available = fread(&header, sizeof(AuditMsgHdr), 1, fp); - if (header_available != 1 || pgaudit_invalid_header(&header)) { + if (header_available != 1 || pgaudit_invalid_header(&header, newVersion)) { ereport(LOG, (errmsg("invalid data in audit file \"%s\"", t_thrd.audit.pgaudit_filepath))); /* label the currupt file num, then it may be reinit in audit thread but not here. */ pgaudit_mark_corrupt_info(fnum); @@ -2905,7 +3079,7 @@ static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 f /* filt and assemble audit info into tuplestore */ datetime = time_t_to_timestamptz(adata->header.time); if (datetime >= begtime && datetime < endtime && header.flags == AUDIT_TUPLE_NORMAL) { - deserialization_to_tuple(values, adata, header); + deserialization_to_tuple(values, adata, header, nulls, newVersion); tuplestore_putvalues(state, tdesc, values, nulls); } @@ -2952,7 +3126,8 @@ static void pgaudit_delete_file(uint32 fnum, TimestampTz begtime, TimestampTz en header.signature[1] != 'U' || header.version != 0 || !(header.fields == (PGAUDIT_QUERY_COLS - 1) || - header.fields == PGAUDIT_QUERY_COLS)) { + header.fields == PGAUDIT_QUERY_COLS || + header.fields == PGAUDIT_QUERY_COLS_NEW)) { /* make sure we are compatible with the older version audit file */ ereport(LOG, (errmsg("invalid data in audit file \"%s\"", t_thrd.audit.pgaudit_filepath))); break; @@ -3066,7 +3241,7 @@ static TimestampTz pgaudit_headertime(uint32 fnum, const char *audit_directory) * Brief : whether the invoke is allowed for query audit. * Description : */ -static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallInfoData *fcinfo, TupleDesc &tupdesc) +static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallInfoData *fcinfo, TupleDesc &tupdesc, bool newVersion) { Oid roleid = InvalidOid; /* Check some permissions first */ @@ -3090,8 +3265,14 @@ static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallI ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("return type must be a row type"))); } - if (tupdesc->natts != PGAUDIT_QUERY_COLS) { - ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("attribute count of the return row type not matched"))); + if (newVersion) { + if (tupdesc->natts != PGAUDIT_QUERY_COLS_NEW) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("attribute count of the return row type not matched"))); + } + } else { + if (tupdesc->natts != PGAUDIT_QUERY_COLS) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("attribute count of the return row type not matched"))); + } } } @@ -3111,8 +3292,12 @@ Datum pg_query_audit(PG_FUNCTION_ARGS) TimestampTz endtime = PG_GETARG_TIMESTAMPTZ(1); char* audit_dir = NULL; char real_audit_dir[PATH_MAX] = {0}; + bool newVersion = false; + if (pgaudit_need_sha_code()) { + newVersion = true; + } - pgaudit_query_valid_check(rsinfo, fcinfo, tupdesc); + pgaudit_query_valid_check(rsinfo, fcinfo, tupdesc, newVersion); /* * When g_instance.audit_cxt.audit_indextbl is not NULL, @@ -3167,7 +3352,7 @@ Datum pg_query_audit(PG_FUNCTION_ARGS) satisfied = pgaudit_check_system(begtime, endtime, index, real_audit_dir); if (satisfied) { oldcontext = MemoryContextSwitchTo(query_audit_ctx); - pgaudit_query_file(tupstore, tupdesc, fnum, begtime, endtime, real_audit_dir); + pgaudit_query_file(tupstore, tupdesc, fnum, begtime, endtime, real_audit_dir, newVersion); MemoryContextSwitchTo(oldcontext); MemoryContextReset(query_audit_ctx); satisfied = false; @@ -3333,14 +3518,22 @@ static void CheckAuditFile(void) auditfile_init(true); } -static bool pgaudit_invalid_header(const AuditMsgHdr* header) +static bool pgaudit_invalid_header(const AuditMsgHdr* header, bool newVersion) { - return ((header->signature[0]) != 'A' || header->signature[1] != 'U' || header->version != 0 || - !(header->fields == (PGAUDIT_QUERY_COLS - 1) || header->fields == PGAUDIT_QUERY_COLS) || - (header->size <= sizeof(AuditMsgHdr)) || - (header->size >= (uint32)u_sess->attr.attr_security.Audit_RotationSize * 1024L)); + if (newVersion) { + return ((header->signature[0]) != 'A' || header->signature[1] != 'U' || header->version != 0 || + !(header->fields == (PGAUDIT_QUERY_COLS - 1) || header->fields == PGAUDIT_QUERY_COLS || header->fields == PGAUDIT_QUERY_COLS_NEW) || + (header->size <= sizeof(AuditMsgHdr)) || + (header->size >= (uint32)u_sess->attr.attr_security.Audit_RotationSize * 1024L)); + } else { + return ((header->signature[0]) != 'A' || header->signature[1] != 'U' || header->version != 0 || + !(header->fields == (PGAUDIT_QUERY_COLS - 1) || header->fields == PGAUDIT_QUERY_COLS) || + (header->size <= sizeof(AuditMsgHdr)) || + (header->size >= (uint32)u_sess->attr.attr_security.Audit_RotationSize * 1024L)); + } } + /* * mark corrupt fnum by postgres thread * used for reinit audit files in audit thread diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql new file mode 100644 index 0000000000..6e9aa41f79 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql @@ -0,0 +1,39 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + TEXT, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql new file mode 100644 index 0000000000..6e9aa41f79 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql @@ -0,0 +1,39 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + TEXT, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql new file mode 100644 index 0000000000..33db65f6b5 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql @@ -0,0 +1,43 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT, + OUT sha_code TEXT, + OUT verify_result BOOLEAN +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + TEXT, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT, + OUT sha_code TEXT, + OUT verify_result BOOLEAN +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql new file mode 100644 index 0000000000..ae02b572e8 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql @@ -0,0 +1,43 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT, + OUT sha_code TEXT, + OUT verify_result BOOLEAN +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; +CREATE FUNCTION pg_catalog.pg_query_audit( + TIMESTAMPTZ, + TIMESTAMPTZ, + TEXT, + OUT "time" TIMESTAMPTZ, + OUT type TEXT, + OUT result TEXT, + OUT userid TEXT, + OUT username TEXT, + OUT database TEXT, + OUT client_conninfo TEXT, + OUT object_name TEXT, + OUT detail_info TEXT, + OUT node_name TEXT, + OUT thread_id TEXT, + OUT local_port TEXT, + OUT remote_port TEXT, + OUT sha_code TEXT, + OUT verify_result BOOLEAN +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; \ No newline at end of file diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 824e2bf321..205fe670bb 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -151,6 +151,7 @@ extern const uint32 ROTATE_UNROTATE_VERSION_NUM; extern const uint32 FLOAT_VERSION_NUMBER; extern const uint32 STRAIGHT_JOIN_VERSION_NUMBER; extern const uint32 PARALLEL_ENABLE_VERSION_NUM; +extern const uint32 AUDIT_SHA_VERSION_NUM; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); diff --git a/src/include/pgaudit.h b/src/include/pgaudit.h index e3aee317d0..60e7c3f35b 100644 --- a/src/include/pgaudit.h +++ b/src/include/pgaudit.h @@ -166,7 +166,8 @@ typedef enum { AUDIT_NODENAME_INFO, AUDIT_THREADID_INFO, AUDIT_LOCALPORT_INFO, - AUDIT_REMOTEPORT_INFO + AUDIT_REMOTEPORT_INFO, + AUDIT_SHACODE, } AuditStringFieldNum; struct AuditElasticEvent { @@ -182,6 +183,7 @@ struct AuditElasticEvent { const char* threadIdInfo; const char* localPortInfo; const char* remotePortInfo; + const char* shaCode; long long eventTime; }; -- Gitee From 89703f802fc010e7087dc2eb8393ccd5faf4bafb Mon Sep 17 00:00:00 2001 From: hwhbj Date: Wed, 31 Jul 2024 14:42:28 +0800 Subject: [PATCH 121/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Djdbc=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=8A=A5=E6=96=87=E5=90=8E=EF=BC=8C=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E5=A4=8D=E5=88=B6=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/replication/datasender.cpp | 2 ++ src/gausskernel/storage/replication/walsender.cpp | 1 + 2 files changed, 3 insertions(+) diff --git a/src/gausskernel/storage/replication/datasender.cpp b/src/gausskernel/storage/replication/datasender.cpp index 55544bd51e..9065dbdf4e 100755 --- a/src/gausskernel/storage/replication/datasender.cpp +++ b/src/gausskernel/storage/replication/datasender.cpp @@ -292,6 +292,8 @@ static void DataSndHandshake(void) } } break; + case 'V': /* client conn driver support trace info*/ + break; case 'X': /* standby is closing the connection */ proc_exit(0); diff --git a/src/gausskernel/storage/replication/walsender.cpp b/src/gausskernel/storage/replication/walsender.cpp index a0f17456ea..e597629832 100755 --- a/src/gausskernel/storage/replication/walsender.cpp +++ b/src/gausskernel/storage/replication/walsender.cpp @@ -639,6 +639,7 @@ static void WalSndHandshake(void) /* standby is closing the connection */ proc_exit(0); /* fall-through */ + case 'V': /* client conn driver support trace info*/ case 'P': /* standby is closing the connection */ break; -- Gitee From 826fa98ecdcf4d15fc70c0c3c2354795f59f7dc0 Mon Sep 17 00:00:00 2001 From: hwhbj Date: Wed, 31 Jul 2024 16:10:15 +0800 Subject: [PATCH 122/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dmysql=20jdbc=E8=BF=9E?= =?UTF-8?q?=E6=8E=A5openGauss=E5=A4=B1=E8=B4=A5=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 782bd6771a..4263d5e41c 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -6689,7 +6689,10 @@ void BeginReportingGUCOptions(void) if (conf->flags & GUC_REPORT) ReportGUCOption(conf); } - ReportTraceOption(); + if (u_sess->attr.attr_common.enable_record_nettime) { + ReportTraceOption(); + } + } /* -- Gitee From f9b28194eb1229f467a9a889dd186e1ae5ea8eaf Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Wed, 31 Jul 2024 10:19:49 +0800 Subject: [PATCH 123/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BE=E4=B8=8D=E6=94=AF?= =?UTF-8?q?=E6=8C=81reindex=20database=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/tcop/utility.cpp | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 12154a8aca..ea05f3c765 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -13908,13 +13908,15 @@ struct OndemandParseInfo { NodeTag parseType; ObjectType objectType; RangeVar* relationRangeVar; + const char* dbName; }; -static List* AppendItemToOndemandParseList(List* ondemandParseList, NodeTag parseType, ObjectType objectType, RangeVar* relationRangeVar) { +static List* AppendItemToOndemandParseList(List* ondemandParseList, NodeTag parseType, ObjectType objectType, RangeVar* relationRangeVar, const char* dbName = NULL) { OndemandParseInfo* info = (OndemandParseInfo*)palloc(sizeof(OndemandParseInfo)); info->parseType = parseType; info->objectType = objectType; info->relationRangeVar = relationRangeVar; + info->dbName = dbName; ondemandParseList = lappend(ondemandParseList, info); return ondemandParseList; } @@ -14011,7 +14013,8 @@ static void PreRedoInOndemandRecovery(Node* parseTree) { case T_ReindexStmt: { ReindexStmt* reindexStmt = (ReindexStmt*) parseTree; ondemandParseList = AppendItemToOndemandParseList(ondemandParseList, T_ReindexStmt, - reindexStmt->kind, reindexStmt->relation); + reindexStmt->kind, reindexStmt->relation, + reindexStmt->kind == OBJECT_DATABASE ? reindexStmt->name : NULL); break; } /* ALTER TABLE/INDEX/PROCEDURE/ SET SCHEMA */ @@ -14045,7 +14048,17 @@ static void PreRedoInOndemandRecovery(Node* parseTree) { case T_AlterOwnerStmt: { break; } - case T_ReindexStmt: + case T_ReindexStmt: { + const char* dbName = ondemandParseInfo->dbName; + Oid dbOid = get_database_oid_by_name(dbName); + if (!OidIsValid(dbOid)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("Database \"%s\" does not exist.", dbName))); + break; + } + RedoDatabaseForOndemandExtremeRTO(dbOid); + break; + } default: { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("[On-demand] Not support this sql in ondemand redo phase, nodeType: %d, relKind: %d.", -- Gitee From 58c4878c4fefe68afd7421025355f0dea8e04c76 Mon Sep 17 00:00:00 2001 From: humengyao Date: Mon, 1 Jan 2024 22:04:12 -0800 Subject: [PATCH 124/347] =?UTF-8?q?=E3=80=90=E5=9B=9E=E5=90=886.0.0?= =?UTF-8?q?=E3=80=91=E8=A7=A3=E5=86=B3=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B?= =?UTF-8?q?hw=5Faudit=5Ffull=E9=9A=8F=E6=9C=BA=E6=8C=82=E5=BA=93=E5=AF=BC?= =?UTF-8?q?=E8=87=B4=E5=90=8E=E7=BB=AD=E6=89=80=E6=9C=89=E7=94=A8=E4=BE=8B?= =?UTF-8?q?=E5=A4=B1=E8=B4=A5=EF=BC=8Caudit=E7=89=88=E5=9D=97=E5=AD=98?= =?UTF-8?q?=E5=9C=A8=E6=AE=B5=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/postmaster/pgaudit.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp index f223a42830..7c430fcf91 100755 --- a/src/gausskernel/process/postmaster/pgaudit.cpp +++ b/src/gausskernel/process/postmaster/pgaudit.cpp @@ -1081,11 +1081,15 @@ static void pgaudit_write_file(char* buffer, int count) /* if record time is earlier than current file's create time, * create a new audit file to avoid the confusion caused by system clock change */ FILE* fh = NULL; + LWLockAcquire(AuditIndexFileLock, LW_SHARED); + bool haslock = true; if (g_instance.audit_cxt.audit_indextbl) { AuditIndexItem *cur_item = g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[t_thrd.audit.cur_thread_idx]; if (curtime < cur_item->ctime) { + LWLockRelease(AuditIndexFileLock); + haslock = false; auditfile_close(SYSAUDITFILE_TYPE); fh = auditfile_open((pg_time_t)time(NULL), "a", true); if (fh != NULL) { @@ -1095,6 +1099,9 @@ static void pgaudit_write_file(char* buffer, int count) } uint32 retry_cnt = 0; + if (haslock) { + LWLockRelease(AuditIndexFileLock); + } retry1: rc = fwrite(buffer, 1, count, t_thrd.audit.sysauditFile); -- Gitee From 0a179f7f76ea824b2b5d48e42c5e2eb3832eb3a7 Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Tue, 30 Jul 2024 15:03:31 +0800 Subject: [PATCH 125/347] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E9=80=82=E9=85=8Ddss?= =?UTF-8?q?=20aio=20post=20pwrite=E6=8E=A5=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_aio.cpp | 1 + .../process/postmaster/pagewriter.cpp | 21 ++++++++++++++++++- src/gausskernel/storage/buffer/bufmgr.cpp | 6 ++++-- src/gausskernel/storage/dss/dss_adaptor.cpp | 4 ++-- src/gausskernel/storage/dss/fio_dss.cpp | 5 +++++ .../storage/smgr/segment/data_file.cpp | 6 +++++- .../storage/smgr/segment/segbuffer.cpp | 6 ++++-- .../storage/smgr/segment/space.cpp | 5 +++-- src/gausskernel/storage/smgr/segstore.cpp | 4 ++-- src/include/ddes/dms/ss_aio.h | 5 +++++ src/include/postmaster/pagewriter.h | 1 + src/include/storage/dss/dss_adaptor.h | 5 ++++- src/include/storage/dss/fio_dss.h | 2 +- src/include/storage/smgr/segment.h | 2 +- src/include/storage/smgr/segment_internal.h | 2 +- 15 files changed, 59 insertions(+), 16 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_aio.cpp b/src/gausskernel/ddes/adapter/ss_aio.cpp index d0067a2b27..e903c83aa5 100644 --- a/src/gausskernel/ddes/adapter/ss_aio.cpp +++ b/src/gausskernel/ddes/adapter/ss_aio.cpp @@ -49,6 +49,7 @@ static void WaitDSSAioComplete(DSSAioCxt *aio_cxt, int index) for (int i = 0; i < num; i++) { aio_cxt->aiocb(&aio->events[i]); } + event_num -= num; } diff --git a/src/gausskernel/process/postmaster/pagewriter.cpp b/src/gausskernel/process/postmaster/pagewriter.cpp index 96836e373c..4c4f0dfc22 100755 --- a/src/gausskernel/process/postmaster/pagewriter.cpp +++ b/src/gausskernel/process/postmaster/pagewriter.cpp @@ -47,6 +47,7 @@ #include "gstrace/gstrace_infra.h" #include "gstrace/postmaster_gstrace.h" #include "ddes/dms/ss_dms_bufmgr.h" +#include "storage/dss/fio_dss.h" #include @@ -323,6 +324,8 @@ void incre_ckpt_pagewriter_cxt_init() /* 2M AIO buffer */ char *unaligned_buf = (char *)palloc0(DSS_AIO_BATCH_SIZE * DSS_AIO_UTIL_NUM * BLCKSZ + BLCKSZ); pgwr->aio_buf = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + pgwr->aio_extra = + (PgwrAioExtraData *)palloc0(DSS_AIO_BATCH_SIZE * DSS_AIO_UTIL_NUM * sizeof(PgwrAioExtraData)); } } @@ -1643,7 +1646,8 @@ void crps_destory_ctxs() static void incre_ckpt_aio_callback(struct io_event *event) { - BufferDesc *buf_desc = (BufferDesc *)(event->data); + PgwrAioExtraData *tempAioExtra = (PgwrAioExtraData *)(event->data); + BufferDesc *buf_desc = (BufferDesc *)(tempAioExtra->aio_bufdesc); uint32 written_size = event->obj->u.c.nbytes; if (written_size != event->res) { ereport(WARNING, (errmsg("aio write failed (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", -(int32)(event->res), @@ -1687,6 +1691,21 @@ static void incre_ckpt_aio_callback(struct io_event *event) pfree(origin_buf); #endif + off_t roffset = 0; + if (IsSegmentBufferID(buf_desc->buf_id)) { + roffset = ((buf_desc->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; + } else { + roffset = ((buf_desc->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; + } + + int aioRet = dss_aio_post_pwrite(event->obj->data, tempAioExtra->aio_fd, event->obj->u.c.nbytes, roffset); + if (aioRet != 0) { + ereport(PANIC, (errmsg("failed to post write by asnyc io (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", errno, + buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, + (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum))); + } + buf_desc->extra->aio_in_progress = false; UnpinBuffer(buf_desc, true); } diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 9ab157778c..e062afafde 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -5147,8 +5147,10 @@ void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod, boo securec_check(ret, "\0", "\0"); struct iocb *iocb_ptr = DSSAioGetIOCB(aio_cxt); + PgwrAioExtraData* tempAioExtra = &(pgwr->aio_extra[aiobuf_id]); int32 io_ret = seg_physical_aio_prep_pwrite(spc, fakenode, bufferinfo.blockinfo.forknum, - bufdesc->extra->seg_blockno, tempBuf, (void *)iocb_ptr); + bufdesc->extra->seg_blockno, tempBuf, (void *)iocb_ptr, (void *)tempAioExtra); + tempAioExtra->aio_bufdesc = (void *)bufdesc; if (io_ret != DSS_SUCCESS) { ereport(PANIC, (errmsg("dss aio failed, buffer: %d/%d/%d/%d/%d %d-%u", fakenode.spcNode, fakenode.dbNode, fakenode.relNode, (int)fakenode.bucketNode, @@ -5164,7 +5166,7 @@ void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod, boo t_thrd.dms_cxt.buf_in_aio = true; bufdesc->extra->aio_in_progress = true; /* should be after io_prep_pwrite, because io_prep_pwrite will memset iocb struct */ - iocb_ptr->data = (void *)bufdesc; + iocb_ptr->data = (void *)tempAioExtra; DSSAioAppendIOCB(aio_cxt, iocb_ptr); } else { seg_physical_write(spc, fakenode, bufferinfo.blockinfo.forknum, bufdesc->extra->seg_blockno, bufToWrite, diff --git a/src/gausskernel/storage/dss/dss_adaptor.cpp b/src/gausskernel/storage/dss/dss_adaptor.cpp index 0b1ce5c671..5af7d11f12 100644 --- a/src/gausskernel/storage/dss/dss_adaptor.cpp +++ b/src/gausskernel/storage/dss/dss_adaptor.cpp @@ -121,8 +121,7 @@ int dss_device_init(const char *conn_path, bool enable_dss) SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_stat", (void **)&device_op.dss_stat)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_lstat", (void **)&device_op.dss_lstat)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_fstat", (void **)&device_op.dss_fstat)); - SS_RETURN_IFERR( - dss_load_symbol(device_op.handle, "dss_set_main_inst", (void **)&device_op.dss_set_main_inst)); + SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_set_main_inst", (void **)&device_op.dss_set_main_inst)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_set_svr_path", (void **)&device_op.dss_set_svr_path)); SS_RETURN_IFERR( dss_load_symbol(device_op.handle, "dss_register_log_callback", (void **)&device_op.dss_register_log_callback)); @@ -131,6 +130,7 @@ int dss_device_init(const char *conn_path, bool enable_dss) SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_compare_size_equal", (void **)&device_op.dss_compare_size)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_aio_prep_pwrite", (void **)&device_op.dss_aio_pwrite)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_aio_prep_pread", (void **)&device_op.dss_aio_pread)); + SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_aio_post_pwrite", (void **)&device_op.dss_aio_post_pwrite)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_init_logger", (void **)&device_op.dss_init_logger)); SS_RETURN_IFERR(dss_load_symbol(device_op.handle, "dss_refresh_logger", (void **)&device_op.dss_refresh_logger)); diff --git a/src/gausskernel/storage/dss/fio_dss.cpp b/src/gausskernel/storage/dss/fio_dss.cpp index afbee1add9..8471f21e9b 100644 --- a/src/gausskernel/storage/dss/fio_dss.cpp +++ b/src/gausskernel/storage/dss/fio_dss.cpp @@ -721,3 +721,8 @@ int dss_aio_prep_pread(void *iocb, int fd, void *buf, size_t count, long long of { return g_dss_device_op.dss_aio_pread(iocb, fd, buf, count, offset); } + +int dss_aio_post_pwrite(void *iocb, int fd, size_t count, long long offset) +{ + return g_dss_device_op.dss_aio_post_pwrite(iocb, fd, count, offset); +} \ No newline at end of file diff --git a/src/gausskernel/storage/smgr/segment/data_file.cpp b/src/gausskernel/storage/smgr/segment/data_file.cpp index 593be40bb3..16253c6399 100644 --- a/src/gausskernel/storage/smgr/segment/data_file.cpp +++ b/src/gausskernel/storage/smgr/segment/data_file.cpp @@ -503,7 +503,11 @@ void df_extend_internal(SegLogicFile *sf) df_extend_file_vector(sf); } int new_fd; - new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); + if (ENABLE_DSS) { + new_fd = dv_open_file(filename, O_RDWR | O_CREAT | DSS_FT_NODE_FLAG_INNER_INITED, SEGMENT_FILE_MODE); + } else { + new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); + } if (new_fd < 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("[segpage] could not create file \"%s\": %m", filename))); } diff --git a/src/gausskernel/storage/smgr/segment/segbuffer.cpp b/src/gausskernel/storage/smgr/segment/segbuffer.cpp index 448f6acb6e..85bbe77e10 100644 --- a/src/gausskernel/storage/smgr/segment/segbuffer.cpp +++ b/src/gausskernel/storage/smgr/segment/segbuffer.cpp @@ -420,8 +420,10 @@ void SegFlushBuffer(BufferDesc *buf, SMgrRelation reln) securec_check(ret, "\0", "\0"); struct iocb *iocb_ptr = DSSAioGetIOCB(aio_cxt); + PgwrAioExtraData* tempAioExtra = &(pgwr->aio_extra[aiobuf_id]); int32 io_ret = seg_physical_aio_prep_pwrite(spc, buf->tag.rnode, buf->tag.forkNum, - buf->tag.blockNum, tempBuf, (void *)iocb_ptr); + buf->tag.blockNum, tempBuf, (void *)iocb_ptr, (void *)tempAioExtra); + tempAioExtra->aio_bufdesc = (void *)buf; if (io_ret != DSS_SUCCESS) { ereport(PANIC, (errmsg("dss aio failed, buffer: %d/%d/%d/%d/%d %d-%u", buf->tag.rnode.spcNode, buf->tag.rnode.dbNode, buf->tag.rnode.relNode, (int)buf->tag.rnode.bucketNode, @@ -437,7 +439,7 @@ void SegFlushBuffer(BufferDesc *buf, SMgrRelation reln) buf->extra->aio_in_progress = true; t_thrd.dms_cxt.buf_in_aio = true; /* should be after io_prep_pwrite, because io_prep_pwrite will memset iocb struct */ - iocb_ptr->data = (void *)buf; + iocb_ptr->data = (void *)tempAioExtra; DSSAioAppendIOCB(aio_cxt, iocb_ptr); } else { seg_physical_write(spc, buf->tag.rnode, buf->tag.forkNum, buf->tag.blockNum, (char *)buf_to_write, false); diff --git a/src/gausskernel/storage/smgr/segment/space.cpp b/src/gausskernel/storage/smgr/segment/space.cpp index 3843de05a4..dff60bdc46 100644 --- a/src/gausskernel/storage/smgr/segment/space.cpp +++ b/src/gausskernel/storage/smgr/segment/space.cpp @@ -40,8 +40,8 @@ #include "utils/relfilenodemap.h" #include "pgxc/execRemote.h" #include "ddes/dms/ss_transaction.h" +#include "ddes/dms/ss_aio.h" #include "storage/file/fio_device.h" -#include "libaio.h" static void SSInitSegLogicFile(SegSpace *spc); @@ -108,7 +108,7 @@ void spc_write_block(SegSpace *spc, RelFileNode relNode, ForkNumber forknum, con } int32 spc_aio_prep_pwrite(SegSpace *spc, RelFileNode relNode, ForkNumber forknum, BlockNumber blocknum, - const char *buffer, void *iocb_ptr) + const char *buffer, void *iocb_ptr, void *tempAioExtra) { int egid = EXTENT_TYPE_TO_GROUPID(relNode.relNode); SegExtentGroup *seg = &spc->extent_group[egid][forknum]; @@ -120,6 +120,7 @@ int32 spc_aio_prep_pwrite(SegSpace *spc, RelFileNode relNode, ForkNumber forknum SegPhysicalFile spf = df_get_physical_file(seg->segfile, sliceno, blocknum); int32 ret; if (is_dss_fd(spf.fd)) { + ((PgwrAioExtraData *)tempAioExtra)->aio_fd = spf.fd; ret = dss_aio_prep_pwrite(iocb_ptr, spf.fd, (void *)buffer, BLCKSZ, roffset); } else { io_prep_pwrite((struct iocb *)iocb_ptr, spf.fd, (void *)buffer, BLCKSZ, roffset); diff --git a/src/gausskernel/storage/smgr/segstore.cpp b/src/gausskernel/storage/smgr/segstore.cpp index 6102cce6a0..dfe77382ff 100755 --- a/src/gausskernel/storage/smgr/segstore.cpp +++ b/src/gausskernel/storage/smgr/segstore.cpp @@ -1922,12 +1922,12 @@ void seg_physical_write(SegSpace *spc, RelFileNode &rNode, ForkNumber forknum, B } int32 seg_physical_aio_prep_pwrite(SegSpace *spc, RelFileNode &rNode, ForkNumber forknum, BlockNumber blocknum, - const char *buffer, void *iocb_ptr) + const char *buffer, void *iocb_ptr, void *tempAioExtra) { SegmentCheck(IsSegmentPhysicalRelNode(rNode)); SegmentCheck(spc != NULL); - return spc_aio_prep_pwrite(spc, rNode, forknum, blocknum, buffer, iocb_ptr); + return spc_aio_prep_pwrite(spc, rNode, forknum, blocknum, buffer, iocb_ptr, tempAioExtra); } static bool check_meta_data(BlockNumber extent, uint32 extent_size, uint32* offset_block) { diff --git a/src/include/ddes/dms/ss_aio.h b/src/include/ddes/dms/ss_aio.h index db41149e12..58d3c2b970 100644 --- a/src/include/ddes/dms/ss_aio.h +++ b/src/include/ddes/dms/ss_aio.h @@ -49,6 +49,11 @@ typedef struct DSSAioCxt { AioUtil aio[DSS_AIO_UTIL_NUM]; } DSSAioCxt; +typedef struct PgwrAioExtraData { + int aio_fd; + void* aio_bufdesc; +} PgwrAioExtraData; + void DSSAioInitialize(DSSAioCxt *aio_cxt, aio_callback callback); void DSSAioDestroy(DSSAioCxt *aio_cxt); struct iocb* DSSAioGetIOCB(DSSAioCxt *aio_cxt); diff --git a/src/include/postmaster/pagewriter.h b/src/include/postmaster/pagewriter.h index 6d458f6cd2..c55f428e9e 100644 --- a/src/include/postmaster/pagewriter.h +++ b/src/include/postmaster/pagewriter.h @@ -87,6 +87,7 @@ typedef struct PageWriterProc { /* auxiluary structs for implementing AIO in DSS */ DSSAioCxt aio_cxt; char *aio_buf; + PgwrAioExtraData* aio_extra; } PageWriterProc; typedef struct PageWriterProcs { diff --git a/src/include/storage/dss/dss_adaptor.h b/src/include/storage/dss/dss_adaptor.h index 481d3fe912..e81b0ff59c 100644 --- a/src/include/storage/dss/dss_adaptor.h +++ b/src/include/storage/dss/dss_adaptor.h @@ -69,7 +69,9 @@ typedef int (*dss_get_storage_addr)(int handle, long long offset, char *poolname typedef int (*dss_compare_size_equal)(const char *vg_name, long long *au_size); typedef int (*dss_aio_prep_pwrite_device)(void *iocb, int handle, void *buf, size_t count, long long offset); typedef int (*dss_aio_prep_pread_device)(void *iocb, int handle, void *buf, size_t count, long long offset); -typedef int (*dss_init_logger_t)(char *log_home, unsigned int log_level, unsigned int log_backup_file_count, unsigned long long log_max_file_size); +typedef int (*dss_aio_post_pwrite_device)(void *iocb, int handle, size_t count, long long offset); +typedef int (*dss_init_logger_t)(char *log_home, unsigned int log_level, unsigned int log_backup_file_count, + unsigned long long log_max_file_size); typedef void (*dss_refresh_logger_t)(char *log_field, unsigned long long *value); typedef int (*dss_set_main)(void); typedef struct st_dss_device_op_t { @@ -109,6 +111,7 @@ typedef struct st_dss_device_op_t { dss_compare_size_equal dss_compare_size; dss_aio_prep_pwrite_device dss_aio_pwrite; dss_aio_prep_pread_device dss_aio_pread; + dss_aio_post_pwrite_device dss_aio_post_pwrite; dss_init_logger_t dss_init_logger; dss_refresh_logger_t dss_refresh_logger; dss_set_main dss_set_main_inst; diff --git a/src/include/storage/dss/fio_dss.h b/src/include/storage/dss/fio_dss.h index f179f90f27..ccea0e1514 100644 --- a/src/include/storage/dss/fio_dss.h +++ b/src/include/storage/dss/fio_dss.h @@ -80,5 +80,5 @@ int dss_get_addr(int handle, long long offset, char *poolname, char *imagename, int dss_compare_size(const char *vg_name, long long *au_size); int dss_aio_prep_pwrite(void *iocb, int fd, void *buf, size_t count, long long offset); int dss_aio_prep_pread(void *iocb, int fd, void *buf, size_t count, long long offset); - +int dss_aio_post_pwrite(void *iocb, int fd, size_t count, long long offset); #endif // FIO_DSS_H diff --git a/src/include/storage/smgr/segment.h b/src/include/storage/smgr/segment.h index 4e4c2caeb2..a87f7aa502 100644 --- a/src/include/storage/smgr/segment.h +++ b/src/include/storage/smgr/segment.h @@ -60,7 +60,7 @@ void seg_physical_write(SegSpace *spc, RelFileNode &rNode, ForkNumber forknum, B XLogRecPtr seg_get_headlsn(SegSpace *spc, BlockNumber blockNum, bool isbucket); int32 seg_physical_aio_prep_pwrite(SegSpace *spc, RelFileNode &rNode, ForkNumber forknum, BlockNumber blocknum, - const char *buffer, void *iocb_ptr); + const char *buffer, void *iocb_ptr, void *tempAioExtra); /* segment sync callback */ void forget_space_fsync_request(SegSpace *spc); diff --git a/src/include/storage/smgr/segment_internal.h b/src/include/storage/smgr/segment_internal.h index a29a415c59..eb19438110 100644 --- a/src/include/storage/smgr/segment_internal.h +++ b/src/include/storage/smgr/segment_internal.h @@ -416,7 +416,7 @@ void spc_datafile_create(SegSpace *spc, BlockNumber egRelNode, ForkNumber forknu void spc_extend_file(SegSpace *spc, BlockNumber egRelNode, ForkNumber forknum, BlockNumber blkno); bool spc_datafile_exist(SegSpace *spc, BlockNumber egRelNode, ForkNumber forknum); int32 spc_aio_prep_pwrite(SegSpace *spc, RelFileNode relNode, ForkNumber forknum, BlockNumber blocknum, - const char *buffer, void *iocb_ptr); + const char *buffer, void *iocb_ptr, void *tempAioExtra); extern void spc_shrink_files(SegExtentGroup *seg, BlockNumber target_size, bool redo); -- Gitee From c71c3d044bbbfd5eb1d405124baf5c7780607cc0 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Thu, 1 Aug 2024 18:40:58 +0800 Subject: [PATCH 126/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=88=97numeric?= =?UTF-8?q?=E3=80=81float=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B=E6=97=B6?= =?UTF-8?q?=EF=BC=8C=E6=B7=BB=E5=8A=A0=E9=AA=8C=E8=AF=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/tablecmds.cpp | 82 +++++++++++++++++++ src/include/commands/tablecmds.h | 10 +++ src/include/utils/numeric.h | 1 + src/test/regress/expected/test_float.out | 57 +++++++++++++ .../expected/test_numeric_with_neg_scale.out | 20 +++++ src/test/regress/sql/test_float.sql | 20 ++++- .../sql/test_numeric_with_neg_scale.sql | 9 ++ 7 files changed, 198 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 3f4606f6fc..f80a412c80 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -10123,6 +10123,16 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat } else { ((HeapScanDesc) scan)->rs_tupdesc = oldTupDesc; while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { + if (tab->check_pass_with_relempty == AT_FASN_FAIL_PRECISION) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("column to be modified must be empty to decrease precision or scale"))); + } else if (tab->check_pass_with_relempty == AT_FASN_FAIL_TYPE) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("column to be modified must be empty to change datatype"))); + } + if (tab->rewrite > 0) { Oid tupOid = InvalidOid; int newvals_num = 0; @@ -16160,6 +16170,76 @@ void CheckHugeToast(AlteredTableInfo *tab, Relation rel, AttrNumber attnum) } } +/** + * Check numeric type modify. + * column to be modified must be empty to decrease precision or scale; + * column to be modified must be empty to change datatype. + * + * Only valid numeric scale decrease, And when set behavior_compat_options = 'float_as_numeric' + * (float use numeric indicate), valid float(p) can't decrease precision and can't change to other datatype. +*/ +static int CheckFloatIllegalTypeConversion(Relation rel, Oid origintype, int32 origintypmod, Oid targettype, int32 targettypmod) +{ + int32 oriprecision; + int16 oriscale; + int32 tarprecision; + int16 tarscale; + bool compatibilityversion; + int result = AT_FASN_PASS; + + if (RelationIsCUFormat(rel) || !(u_sess->attr.attr_sql.sql_compatibility == A_FORMAT)) { + return result; + } + + compatibilityversion = FLOAT_AS_NUMERIC && t_thrd.proc->workingVersionNum >= FLOAT_VERSION_NUMBER; + + if (origintype == NUMERICOID) { + oriprecision = (int32) ((((uint32)(origintypmod - VARHDRSZ)) >> 16) & 0xffff); + oriscale = (int16) (((uint32)(origintypmod - VARHDRSZ)) & 0xffff); + } + + if (targettype == NUMERICOID) { + tarprecision = (int32) ((((uint32)(targettypmod - VARHDRSZ)) >> 16) & 0xffff); + tarscale = (int16) (((uint32)(targettypmod - VARHDRSZ)) & 0xffff); + + if (origintype == NUMERICOID) { + if (compatibilityversion) { + // float decrease precision + if (IS_FLOAT_AS_NUMERIC(oriscale) && IS_FLOAT_AS_NUMERIC(tarscale) && tarprecision < oriprecision) { + result = AT_FASN_FAIL_PRECISION; + } + // can't change datatype between float and numeric + else if (IS_FLOAT_AS_NUMERIC(oriscale) != IS_FLOAT_AS_NUMERIC(tarscale)) { + result = AT_FASN_FAIL_PRECISION; + } + } + + // numeric decrease scale + if (!IS_FLOAT_AS_NUMERIC(oriscale) && !IS_FLOAT_AS_NUMERIC(tarscale) && tarscale < oriscale) { + result = AT_FASN_FAIL_PRECISION; + } + + return result; + } else if (compatibilityversion && IS_FLOAT_AS_NUMERIC(tarscale) + && (origintype == INT4OID || origintype == INT2OID || origintype ==INT1OID)) { + // O* has no int type, so all use NUMBER(38) instead. + if (ceil(log10(2) * tarprecision) < 38) { + result = AT_FASN_FAIL_PRECISION; + + } + // convert int to numeric + return result; + } + } + + if (compatibilityversion && origintype == NUMERICOID && IS_FLOAT_AS_NUMERIC(oriscale) && targettype != origintype) { + // can't change other float to other datatype. + result = AT_FASN_FAIL_TYPE; + } + + return result; +} + /* * ALTER COLUMN TYPE */ @@ -16313,6 +16393,8 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation transform = (Node*)makeVar(1, attnum, attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0); } + tab->check_pass_with_relempty = CheckFloatIllegalTypeConversion(rel, attTup->atttypid, attTup->atttypmod, targettype, targettypmod); + transform = coerce_to_target_type(pstate, transform, exprType(transform), diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index fdcccfea74..10aa1e0255 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -65,6 +65,15 @@ #define AT_NUM_PASSES 10 #endif +/** + * state information of alter float datatype result (at float_as_numeric). + * + * Phase 3 scan if table not empty and 'check_floatasnumeric' > 0, go err. + */ +#define AT_FASN_PASS 0 +#define AT_FASN_FAIL_PRECISION 1 +#define AT_FASN_FAIL_TYPE 2 + typedef struct AlteredTableInfo { /* Information saved before any work commences: */ Oid relid; /* Relation to work on */ @@ -78,6 +87,7 @@ typedef struct AlteredTableInfo { List* newvals; /* List of NewColumnValue */ bool new_notnull; /* T if we added new NOT NULL constraints */ int rewrite; /* Reason if a rewrite is forced */ + int check_pass_with_relempty; /* alter column check condition, require table empty condition in phase 3 */ Oid newTableSpace; /* new tablespace; 0 means no change */ /* Objects to rebuild after completing ALTER TYPE operations */ List* changedConstraintOids; /* OIDs of constraints to rebuild */ diff --git a/src/include/utils/numeric.h b/src/include/utils/numeric.h index c7c33cb0ba..3c82cec89f 100644 --- a/src/include/utils/numeric.h +++ b/src/include/utils/numeric.h @@ -156,6 +156,7 @@ typedef struct NumericData* Numeric; #define NUMERIC_MAX_PRECISION 1000 #define NUMERIC_MAX_SCALE 1000 #define NUMERIC_MIN_SCALE -84 +#define IS_FLOAT_AS_NUMERIC(scale) ((scale) == INT16_MIN) /* * Internal limits on the scales chosen for calculation results diff --git a/src/test/regress/expected/test_float.out b/src/test/regress/expected/test_float.out index 99aa7c424d..edea5c4657 100644 --- a/src/test/regress/expected/test_float.out +++ b/src/test/regress/expected/test_float.out @@ -123,6 +123,63 @@ SELECT * FROM t1; NaN | NaN | NaN (2 rows) +DROP TABLE t1; +-- alter table test +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +CREATE TABLE t1(col1 int, col2 float(5), col3 float(44), col4 float(20)); +INSERT INTO t1 VALUES (1, 12345678901234.567890123456789, 123456789123456789123456789123456789123456789.123456789123456789123456789123456789, 123.123); +INSERT INTO t1 VALUES (12, 0.12345678901234567890123456789, 0.123456789123456789123456789123456789123456789123456789123456789123456789123456789,123.123); +INSERT INTO t1 VALUES (21, 123456789.12345,123456789123456789123456789.12345678912, 123.123); +SELECT pg_get_tabledef('t1'); + pg_get_tabledef +----------------------------------------- + SET search_path = test_float; + + CREATE TABLE t1 ( + + col1 integer, + + col2 float(5), + + col3 float(44), + + col4 float(20) + + ) + + WITH (orientation=row, compression=no); +(1 row) + +SELECT * FROM t1 ORDER BY col1; + col1 | col2 | col3 | col4 +------+----------------+-----------------------------------------------+--------- + 1 | 12000000000000 | 123456789123460000000000000000000000000000000 | 123.123 + 12 | .12 | .12345678912346 | 123.123 + 21 | 120000000 | 123456789123460000000000000 | 123.123 +(3 rows) + +-- alter table not empty +ALTER TABLE t1 MODIFY (col1 float(1)); -- error +ERROR: column to be modified must be empty to decrease precision or scale +-- alter table empty +DELETE FROM t1; +SELECT * FROM t1 ORDER BY col1; + col1 | col2 | col3 | col4 +------+------+------+------ +(0 rows) + +ALTER TABLE t1 MODIFY (col1 float(1)); -- success +SELECT pg_get_tabledef('t1'); + pg_get_tabledef +----------------------------------------- + SET search_path = test_float; + + CREATE TABLE t1 ( + + col1 float(1), + + col2 float(5), + + col3 float(44), + + col4 float(20) + + ) + + WITH (orientation=row, compression=no); +(1 row) + +SELECT * FROM t1 ORDER BY col1; + col1 | col2 | col3 | col4 +------+------+------+------ +(0 rows) + DROP TABLE t1; -- PL/SQL test CREATE OR REPLACE PACKAGE pak1 as diff --git a/src/test/regress/expected/test_numeric_with_neg_scale.out b/src/test/regress/expected/test_numeric_with_neg_scale.out index 3b795b769a..73f7125b11 100644 --- a/src/test/regress/expected/test_numeric_with_neg_scale.out +++ b/src/test/regress/expected/test_numeric_with_neg_scale.out @@ -187,6 +187,26 @@ Table "numeric_negative_scale_test.t2" DROP TABLE t1; DROP TABLE t2; +-- test alter table to decrease scale +CREATE TABLE t1 (c1 int, c2 numeric(5, 2), c3 numeric(5, -2)); +INSERT INTO t1 VALUES (1, 546.12, 456135.12); +SELECT * FROM t1; + c1 | c2 | c3 +----+--------+-------- + 1 | 546.12 | 456100 +(1 row) + +ALTER TABLE t1 MODIFY (c2 numeric(5, 1)); -- error +ERROR: column to be modified must be empty to decrease precision or scale +ALTER TABLE t1 MODIFY (c3 numeric(5, -3)); -- error +ERROR: column to be modified must be empty to decrease precision or scale +SELECT * FROM t1; + c1 | c2 | c3 +----+--------+-------- + 1 | 546.12 | 456100 +(1 row) + +DROP TABLE t1; CREATE TABLE t3(a numeric(1,1001)); ERROR: NUMERIC scale must be between -84 and 1000 LINE 1: CREATE TABLE t3(a numeric(1,1001)); diff --git a/src/test/regress/sql/test_float.sql b/src/test/regress/sql/test_float.sql index 7d43409ced..80cca41339 100644 --- a/src/test/regress/sql/test_float.sql +++ b/src/test/regress/sql/test_float.sql @@ -56,6 +56,24 @@ INSERT INTO t1 SELECT 'NaN', 'NaN', 'NaN'; SELECT * FROM t1; DROP TABLE t1; +-- alter table test +set behavior_compat_options = 'float_as_numeric, truncate_numeric_tail_zero'; +CREATE TABLE t1(col1 int, col2 float(5), col3 float(44), col4 float(20)); +INSERT INTO t1 VALUES (1, 12345678901234.567890123456789, 123456789123456789123456789123456789123456789.123456789123456789123456789123456789, 123.123); +INSERT INTO t1 VALUES (12, 0.12345678901234567890123456789, 0.123456789123456789123456789123456789123456789123456789123456789123456789123456789,123.123); +INSERT INTO t1 VALUES (21, 123456789.12345,123456789123456789123456789.12345678912, 123.123); +SELECT pg_get_tabledef('t1'); +SELECT * FROM t1 ORDER BY col1; +-- alter table not empty +ALTER TABLE t1 MODIFY (col1 float(1)); -- error +-- alter table empty +DELETE FROM t1; +SELECT * FROM t1 ORDER BY col1; +ALTER TABLE t1 MODIFY (col1 float(1)); -- success +SELECT pg_get_tabledef('t1'); +SELECT * FROM t1 ORDER BY col1; +DROP TABLE t1; + -- PL/SQL test CREATE OR REPLACE PACKAGE pak1 as @@ -83,4 +101,4 @@ DROP PACKAGE pak1; reset behavior_compat_options; reset current_schema; -drop schema test_float cascade; \ No newline at end of file +drop schema test_float cascade; diff --git a/src/test/regress/sql/test_numeric_with_neg_scale.sql b/src/test/regress/sql/test_numeric_with_neg_scale.sql index 7b57dae6f0..aeb9245e03 100644 --- a/src/test/regress/sql/test_numeric_with_neg_scale.sql +++ b/src/test/regress/sql/test_numeric_with_neg_scale.sql @@ -71,6 +71,15 @@ CREATE TABLE t2(a numeric(1,-84)); DROP TABLE t1; DROP TABLE t2; +-- test alter table to decrease scale +CREATE TABLE t1 (c1 int, c2 numeric(5, 2), c3 numeric(5, -2)); +INSERT INTO t1 VALUES (1, 546.12, 456135.12); +SELECT * FROM t1; +ALTER TABLE t1 MODIFY (c2 numeric(5, 1)); -- error +ALTER TABLE t1 MODIFY (c3 numeric(5, -3)); -- error +SELECT * FROM t1; +DROP TABLE t1; + CREATE TABLE t3(a numeric(1,1001)); CREATE TABLE t3(a numeric(1,-85)); CREATE TABLE t3(a numeric(1,1001)); -- Gitee From 3809d9261c2ca8f2c8de422a06228f3b20617ca4 Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 1 Aug 2024 10:06:20 +0800 Subject: [PATCH 127/347] =?UTF-8?q?=E3=80=90=E5=A4=84=E7=90=868.0=20core?= =?UTF-8?q?=E9=97=AE=E9=A2=98=E3=80=91blob=E5=AD=97=E7=AC=A6=E5=BA=8F?= =?UTF-8?q?=E6=96=AD=E8=A8=80=E5=AF=BC=E8=87=B4core?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/nodeFuncs.cpp | 15 +++++++++++++-- src/gausskernel/optimizer/commands/view.cpp | 4 +++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp index a1c04ce1a8..17a9d95120 100644 --- a/src/common/backend/nodes/nodeFuncs.cpp +++ b/src/common/backend/nodes/nodeFuncs.cpp @@ -34,12 +34,14 @@ #endif /* FRONTEND_PARSER */ #include "storage/tcap.h" #include "parser/parse_utilcmd.h" +#include "parser/parse_type.h" static bool query_check_no_flt_walker(Node* node, void* context); static bool query_check_srf_walker(Node* node, void* context); static bool expression_returns_set_walker(Node* node, void* context); static bool expression_rownum_walker(Node* node, void* context); static int leftmostLoc(int loc1, int loc2); +static void AssertExprCollation(const Node* expr, Oid collation); Oid userSetElemTypeCollInfo(const Node* expr, Oid (*exprFunc)(const Node*)); /* @@ -1070,6 +1072,15 @@ Oid exprInputCollation(const Node* expr) return coll; } +static void AssertExprCollation(const Node* expr, Oid collation) +{ + Oid expr_collation = exprCollation(expr); + if (DB_IS_CMPT(B_FORMAT) && ENABLE_MULTI_CHARSET && IsBinaryType(exprType(expr))) { + expr_collation = BINARY_COLLATION_OID; + } + Assert(collation == expr_collation); +} + /* * exprSetCollation - * Assign collation information to an expression tree node. @@ -1114,7 +1125,7 @@ void exprSetCollation(Node* expr, Oid collation) ((FuncExpr*)expr)->funccollid = collation; break; case T_NamedArgExpr: - Assert(collation == exprCollation((Node*)((NamedArgExpr*)expr)->arg)); + AssertExprCollation((Node*)((NamedArgExpr*)expr)->arg, collation); break; case T_OpExpr: ((OpExpr*)expr)->opcollid = collation; @@ -1148,7 +1159,7 @@ void exprSetCollation(Node* expr, Oid collation) tent = (TargetEntry*)linitial(qtree->targetList); Assert(IsA(tent, TargetEntry)); Assert(!tent->resjunk); - Assert(collation == exprCollation((Node*)tent->expr)); + AssertExprCollation((Node*)tent->expr, collation); } else { /* for all other sublink types, result is boolean */ Assert(!OidIsValid(collation)); diff --git a/src/gausskernel/optimizer/commands/view.cpp b/src/gausskernel/optimizer/commands/view.cpp index d0a2b9aadc..1d2f171fe3 100644 --- a/src/gausskernel/optimizer/commands/view.cpp +++ b/src/gausskernel/optimizer/commands/view.cpp @@ -46,6 +46,7 @@ #include "utils/rel_gs.h" #include "utils/syscache.h" #include "foreign/foreign.h" +#include "parser/parse_type.h" #ifdef PGXC #include "pgxc/execRemote.h" #include "tcop/utility.h" @@ -158,8 +159,9 @@ static ObjectAddress DefineVirtualRelation(RangeVar* relation, List* tlist, bool (errcode(ERRCODE_INDETERMINATE_COLLATION), errmsg("could not determine which collation to use for view column \"%s\"", def->colname), errhint("Use the COLLATE clause to set the collation explicitly."))); - } else + } else if (!(DB_IS_CMPT(B_FORMAT) && ENABLE_MULTI_CHARSET && IsBinaryType(exprType((Node*)tle->expr)))) { Assert(!OidIsValid(def->collOid)); + } def->constraints = NIL; attrList = lappend(attrList, def); -- Gitee From 763c7dc6d016638b85f252ead8b58da223bad475 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Thu, 1 Aug 2024 19:35:55 +0800 Subject: [PATCH 128/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dswitchover=E5=8D=87=E4=B8=BB=E5=AE=8C=E6=88=90?= =?UTF-8?q?=E5=89=8D=E6=9B=B4=E6=96=B0=E6=8E=A7=E5=88=B6=E6=96=87=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 31b2b8ab9f..9fc74f6e00 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -10152,7 +10152,14 @@ void StartupXLOG(void) } t_thrd.shemem_ptr_cxt.ControlFile->time = (pg_time_t)time(NULL); /* No need to hold ControlFileLock yet, we aren't up far enough */ - if (!SS_STANDBY_FAILOVER && SS_ONDEMAND_REALTIME_BUILD_DISABLED) { + /** + * When enable_dms, the following conditions shouldn't update control file here, + * because standby node has read the control file from primary node. + * 1. standby node failover promoting. + * 2. standby node switchover promoting. + * 3. standby node start ondemand realtime build. + */ + if (!SS_STANDBY_FAILOVER && !SS_STANDBY_PROMOTING && SS_ONDEMAND_REALTIME_BUILD_DISABLED) { UpdateControlFile(); } -- Gitee From fe9b5d971e84cf07278ae385a6ae50a47bdf6536 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 1 Aug 2024 20:49:35 +0800 Subject: [PATCH 129/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=B9=B6=E8=A1=8C?= =?UTF-8?q?=E6=B8=B8=E6=A0=87=E7=9A=84=E8=8B=A5=E5=B9=B2=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/copyfuncs.cpp | 8 +- src/common/backend/nodes/outfuncs.cpp | 8 +- src/common/backend/nodes/readfuncs.cpp | 12 +- src/common/backend/parser/parse_expr.cpp | 82 +++++++------ src/common/backend/parser/parse_relation.cpp | 2 +- src/common/backend/utils/misc/guc/guc_sql.cpp | 3 +- src/gausskernel/optimizer/plan/createplan.cpp | 95 +++++++++++---- .../optimizer/plan/streamplan_utils.cpp | 52 ++++++++- src/gausskernel/optimizer/plan/subselect.cpp | 2 + src/gausskernel/process/stream/execStream.cpp | 18 +-- .../process/threadpool/knl_session.cpp | 1 + src/gausskernel/runtime/executor/spi.cpp | 1 + src/include/executor/executor.h | 12 ++ src/include/knl/knl_session.h | 1 + src/include/nodes/plannodes.h | 2 + src/include/optimizer/stream_cost.h | 2 - src/include/parser/parse_expr.h | 2 +- .../regress/expected/cursor_expression.out | 63 ++++++++++ .../expected/parallel_enable_function.out | 109 ++++++++++++++++++ src/test/regress/expected/smp_cursor.out | 50 ++++++++ src/test/regress/sql/cursor_expression.sql | 23 ++++ .../regress/sql/parallel_enable_function.sql | 25 ++++ src/test/regress/sql/smp_cursor.sql | 19 +++ 23 files changed, 503 insertions(+), 89 deletions(-) diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 1741812f27..b7c011adc0 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -303,6 +303,10 @@ static void CopyPlanFields(const Plan* from, Plan* newnode) #ifdef USE_SPQ COPY_SCALAR_FIELD(spq_scan_partial); #endif + if (t_thrd.proc->workingVersionNum >= PARALLEL_ENABLE_VERSION_NUM) { + COPY_SCALAR_FIELD(cursor_expr_level); + COPY_SCALAR_FIELD(cursor_owner_node_id); + } newnode->rightRefState = CopyRightRefState(from->rightRefState); } @@ -2281,10 +2285,6 @@ static Stream* _copyStream(const Stream* from) #ifdef USE_SPQ COPY_SCALAR_FIELD(streamID); #endif - if (t_thrd.proc->workingVersionNum >= PARALLEL_ENABLE_VERSION_NUM) { - COPY_SCALAR_FIELD(cursor_expr_level); - COPY_SCALAR_FIELD(cursor_owner_node_id); - } return newnode; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index d59fb43fd6..d5eef469f6 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -716,6 +716,10 @@ static void _outPlanInfo(StringInfo str, Plan* node) WRITE_BOOL_FIELD(spq_scan_partial); } #endif + if (t_thrd.proc->workingVersionNum >= PARALLEL_ENABLE_VERSION_NUM) { + WRITE_INT_FIELD(cursor_expr_level); + WRITE_INT_FIELD(cursor_owner_node_id); + } } static void _outPruningResult(StringInfo str, PruningResult* node) @@ -1210,10 +1214,6 @@ static void _outStream(StringInfo str, Stream* node) #ifdef USE_SPQ WRITE_INT_FIELD(streamID); #endif - if (t_thrd.proc->workingVersionNum >= PARALLEL_ENABLE_VERSION_NUM) { - WRITE_INT_FIELD(cursor_expr_level); - WRITE_INT_FIELD(cursor_owner_node_id); - } } /* diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 5b818c43b0..2d04188946 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -767,12 +767,6 @@ THR_LOCAL bool skip_read_extern_fields = false; READ_INT_FIELD(stream_level); \ READ_NODE_FIELD(origin_consumer_nodes); \ READ_BOOL_FIELD(is_recursive_local); \ - IF_EXIST(cursor_expr_level) { \ - READ_INT_FIELD(cursor_expr_level); \ - } \ - IF_EXIST(cursor_owner_node_id) { \ - READ_INT_FIELD(cursor_owner_node_id); \ - } \ READ_STREAM_ID(); \ \ READ_DONE(); \ @@ -3530,6 +3524,12 @@ static Plan* _readPlan(Plan* local_node) READ_BOOL_FIELD(spq_scan_partial); } #endif + IF_EXIST(cursor_expr_level) { + READ_INT_FIELD(cursor_expr_level); + } + IF_EXIST(cursor_owner_node_id) { + READ_INT_FIELD(cursor_owner_node_id); + } READ_DONE(); } diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index b85da6f35a..b7b881b98d 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -3894,6 +3894,7 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur CursorExpression* newm = makeNode(CursorExpression); char* queryString; List* raw_parsetree_list = NIL; + List* plantree_list = NIL; PlannedStmt* plan_tree; ListCell* raw_parsetree_cell = NULL; List* stmt_list = NIL; @@ -3903,6 +3904,7 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur if (!smp) { dopControl.CloseSmp(); + dopControl.UnderCursor(); } ParseState* parse_state_parent = pstate; @@ -3915,48 +3917,57 @@ static Node* transformCursorExpression(ParseState* pstate, CursorExpression* cur parse_state_temp = parse_state_temp->parentParseState; } - queryString = pstrdup(cursor_expression->raw_query_str); - raw_parsetree_list = pg_parse_query(queryString); - foreach (raw_parsetree_cell, raw_parsetree_list) { - Node* parsetree = (Node*)lfirst(raw_parsetree_cell); - List* querytree_list = pg_analyze_and_rewrite(parsetree, queryString, NULL, 0, parse_state_parent); - stmt_list = list_concat(stmt_list, querytree_list); - } - - Query* query = castNode(Query, linitial(stmt_list)); + PG_TRY(); + { + queryString = pstrdup(cursor_expression->raw_query_str); + raw_parsetree_list = pg_parse_query(queryString); + foreach (raw_parsetree_cell, raw_parsetree_list) { + Node* parsetree = (Node*)lfirst(raw_parsetree_cell); + List* querytree_list = pg_analyze_and_rewrite(parsetree, queryString, NULL, 0, parse_state_parent); + stmt_list = list_concat(stmt_list, querytree_list); + } - plan_tree = pg_plan_query(query, 0, NULL); + plantree_list = pg_plan_queries(stmt_list, 0, NULL); - if (IsA(plan_tree->planTree, Stream)) { - ((Stream*)plan_tree->planTree)->cursor_expr_level = level; + plan_tree = castNode(PlannedStmt, linitial(plantree_list)); + plan_tree->planTree->cursor_expr_level = level; /* reset cursor_expr_level */ if (level == 1) { u_sess->parser_cxt.cursor_expr_level = 0; } - } - int nParamExec = 0; - parse_state_temp = parse_state_parent; - if (parse_state_temp != NULL) { - nParamExec = list_length(parse_state_temp->cursor_expression_para_var); - } - - plan_tree->nParamExec = nParamExec; - newm->plan = (Node*)plan_tree; - newm->options = cursor_expression->options; - newm->raw_query_str = queryString; - newm->param = (List*)copyObject(parse_state_parent->cursor_expression_para_var); - - if (pstate->p_pre_columnref_hook == NULL && pstate->p_post_columnref_hook == NULL && - pstate->p_expr_kind == EXPR_KIND_SELECT_TARGET && pstate->p_expr_transform_level == 1) { - newm->is_simple_select_target = true; - } else { - newm->is_simple_select_target = false; + int nParamExec = 0; + parse_state_temp = parse_state_parent; + if (parse_state_temp != NULL) { + nParamExec = list_length(parse_state_temp->cursor_expression_para_var); + } + + plan_tree->nParamExec = nParamExec; + newm->plan = (Node*)plan_tree; + newm->options = cursor_expression->options; + newm->raw_query_str = queryString; + newm->param = (List*)copyObject(parse_state_parent->cursor_expression_para_var); + + if (pstate->p_pre_columnref_hook == NULL && pstate->p_post_columnref_hook == NULL && + pstate->p_expr_kind == EXPR_KIND_SELECT_TARGET && pstate->p_expr_transform_level == 1) { + newm->is_simple_select_target = true; + } else { + newm->is_simple_select_target = false; + } + + list_free_ext(stmt_list); + list_free_ext(raw_parsetree_list); } + PG_CATCH(); + { + u_sess->parser_cxt.cursor_expr_level = 0; + /* restore smp */ + dopControl.ResetSmp(); - list_free_ext(stmt_list); - list_free_ext(raw_parsetree_list); + PG_RE_THROW(); + } + PG_END_TRY(); /* restore parent state */ parse_state_parent->transform_outer_columnref_as_param_hook = NULL; @@ -4145,7 +4156,7 @@ static Node *transformStartWithWhereClauseColumnRef(ParseState *pstate, ColumnRe return NULL; } -PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr) +PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr, CursorExpression** ce) { ListCell* lc = NULL; foreach (lc, funcexpr->args) { @@ -4153,7 +4164,10 @@ PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr) if (IsA(arg, CursorExpression)) { CursorExpression* cursorExpr = (CursorExpression*)arg; PlannedStmt* cursorPlan = (PlannedStmt*)cursorExpr->plan; - if (IsA(cursorPlan->planTree, Stream)) { + if (cursorPlan->num_streams > 0) { + if (ce != NULL) { + *ce = cursorExpr; + } return cursorPlan; } } diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp index 28c578b0e4..f92c68b207 100755 --- a/src/common/backend/parser/parse_relation.cpp +++ b/src/common/backend/parser/parse_relation.cpp @@ -1794,7 +1794,7 @@ RangeTblEntry* addRangeTableEntryForFunction( */ if (IsA(funcexpr, FuncExpr)) { PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr); - if (cursorPstmt != NULL) { + if (cursorPstmt != NULL && IsA(cursorPstmt->planTree, Stream)) { rte->cursorDop = cursorPstmt->planTree->lefttree->dop; } } diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index eeb0c890f5..7e4a4f608b 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -3354,7 +3354,8 @@ static void AssignQueryDop(int newval, void* extra) #ifndef ENABLE_MULTIPLE_NODES /* do not reset backend threads tag */ if (u_sess->opt_cxt.query_dop > 1 && - (t_thrd.role == WORKER || t_thrd.role == THREADPOOL_WORKER)) { + (t_thrd.role == WORKER || t_thrd.role == THREADPOOL_WORKER) && + !u_sess->opt_cxt.is_under_cursor) { u_sess->opt_cxt.smp_enabled = true; } #endif diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index d39559fc98..26683e7460 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -6193,6 +6193,40 @@ static PartIterator* create_partIterator_plan( return partItr; } +PlannedStmt* ReBuildNonSmpPlanForCursorExpr(const char* queryString) +{ + List* raw_parsetree_list = NIL; + List* plantree_list = NIL; + List* stmt_list = NIL; + AutoDopControl dopControl; + ListCell* raw_parsetree_cell = NULL; + + dopControl.CloseSmp(); + dopControl.UnderCursor(); + + PG_TRY(); + { + raw_parsetree_list = pg_parse_query(queryString); + foreach (raw_parsetree_cell, raw_parsetree_list) { + Node* parsetree = (Node*)lfirst(raw_parsetree_cell); + List* querytree_list = pg_analyze_and_rewrite(parsetree, queryString, NULL, 0); + stmt_list = list_concat(stmt_list, querytree_list); + } + + plantree_list = pg_plan_queries(stmt_list, 0, NULL); + } + PG_CATCH(); + { + dopControl.ResetSmp(); + PG_RE_THROW(); + } + PG_END_TRY(); + /* restore smp */ + dopControl.ResetSmp(); + + return castNode(PlannedStmt, linitial(plantree_list)); +} + static FunctionScan* make_functionscan(List* qptlist, List* qpqual, Index scanrelid, Node* funcexpr, List* funccolnames, List* funccoltypes, List* funccoltypmods, List* funccolcollations) { @@ -6211,38 +6245,53 @@ static FunctionScan* make_functionscan(List* qptlist, List* qpqual, Index scanre node->funccoltypmods = funccoltypmods; node->funccolcollations = funccolcollations; + CursorExpression* ce = NULL; + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr, &ce); + if (cursorPstmt == NULL) { + return node; + } + if (IS_STREAM_PLAN && u_sess->opt_cxt.query_dop > 1) { FunctionPartitionStrategy strategy; List* partkey = NIL; strategy = GetParallelStrategyAndKey(((FuncExpr*)funcexpr)->funcid, &partkey); - PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr); - if (cursorPstmt != NULL) { - Plan* cursorPlan = cursorPstmt->planTree; - Stream* stream = (Stream*)cursorPlan; - - /* set plan->dop according to cursorplan */ - inherit_plan_locator_info(plan, cursorPlan->lefttree); - stream->smpDesc.consumerDop = plan->dop; - - /* if FUNC_PARTITION_HASH is specified, set distributed_keys and distriType */ - if (strategy == FUNC_PARTITION_HASH && partkey != NIL) { - ListCell* lc1 = NULL; - foreach (lc1, cursorPlan->targetlist) { - TargetEntry* entry = (TargetEntry*)lfirst(lc1); - ListCell* lc2 = NULL; - foreach (lc2, partkey) { - if (strcmp(entry->resname, (char*)lfirst(lc2)) == 0) { - stream->distribute_keys = lappend(stream->distribute_keys, entry->expr); - break; - } + Plan* cursorPlan = cursorPstmt->planTree; + + /* If top-plan is not stream, functionscan can not be parallel executed */ + if (!IsA(cursorPlan, Stream)) { + return node; + } + + Stream* stream = (Stream*)cursorPlan; + + /* set plan->dop according to cursorplan */ + inherit_plan_locator_info(plan, cursorPlan->lefttree); + stream->smpDesc.consumerDop = plan->dop; + + /* if FUNC_PARTITION_HASH is specified, set distributed_keys and distriType */ + if (strategy == FUNC_PARTITION_HASH && partkey != NIL) { + ListCell* lc1 = NULL; + foreach (lc1, cursorPlan->targetlist) { + TargetEntry* entry = (TargetEntry*)lfirst(lc1); + ListCell* lc2 = NULL; + foreach (lc2, partkey) { + if (strcmp(entry->resname, (char*)lfirst(lc2)) == 0) { + stream->distribute_keys = lappend(stream->distribute_keys, entry->expr); + break; } } - plan->distributed_keys = stream->distribute_keys; - stream->smpDesc.distriType = list_length(plan->distributed_keys) > 0 ? - LOCAL_DISTRIBUTE : stream->smpDesc.distriType; } + plan->distributed_keys = stream->distribute_keys; + stream->smpDesc.distriType = list_length(plan->distributed_keys) > 0 ? + LOCAL_DISTRIBUTE : stream->smpDesc.distriType; } + } else { + /* + * if functionscan is disallowed to smp, and cursorPlan has stream node, + * rebuild non-smp plan. For example, subplan is not support smp. + */ + ce->plan = (Node*)ReBuildNonSmpPlanForCursorExpr(pstrdup(ce->raw_query_str)); } return node; diff --git a/src/gausskernel/optimizer/plan/streamplan_utils.cpp b/src/gausskernel/optimizer/plan/streamplan_utils.cpp index fa2f3fa70d..50ca0f152c 100755 --- a/src/gausskernel/optimizer/plan/streamplan_utils.cpp +++ b/src/gausskernel/optimizer/plan/streamplan_utils.cpp @@ -22,6 +22,7 @@ #include "optimizer/planner.h" #include "optimizer/restrictinfo.h" #include "optimizer/tlist.h" +#include "optimizer/planmem_walker.h" #include "parser/parse_collate.h" #include "parser/parse_coerce.h" #include "parser/parse_clause.h" @@ -1565,6 +1566,44 @@ static void set_bucketmap_index(Plan* plan, NodeGroupInfoContext* node_group_inf } } +typedef struct SetStreamPlanCursorWalkerContext { + MethodPlanWalkerContext mpwc; + + int cursor_expr_level; + int cursor_owner_node_id; +} SetStreamPlanCursorWalkerContext; + +bool set_stream_plan_cursor(Node* node_plan, void* context) +{ + if (node_plan == NULL) { + return false; + } + + if (IsA(node_plan, Stream)) { + Plan* plan = (Plan*)node_plan; + plan->cursor_expr_level = ((SetStreamPlanCursorWalkerContext*)context)->cursor_expr_level; + plan->cursor_owner_node_id = ((SetStreamPlanCursorWalkerContext*)context)->cursor_owner_node_id; + } + + return plan_tree_walker(node_plan, (MethodWalker)set_stream_plan_cursor, (void*)context); +} + +/* + * walk through plan tree to set cursor_expr_level/cursor_owner_node_id + */ +void set_stream_plan_cursor_walker(Plan* node_plan) +{ + SetStreamPlanCursorWalkerContext context; + errno_t rc = memset_s(&context, sizeof(SetStreamPlanCursorWalkerContext), 0, + sizeof(SetStreamPlanCursorWalkerContext)); + securec_check(rc, "\0", "\0"); + + context.cursor_expr_level = node_plan->cursor_expr_level; + context.cursor_owner_node_id = node_plan->cursor_owner_node_id; + + (void)set_stream_plan_cursor((Node*)node_plan, &context); +} + /* * finalize_node_id * To finalize node id and parent node id for result plan. The sequence of plan node id doesn't @@ -1606,8 +1645,9 @@ void finalize_node_id(Plan* result_plan, int* plan_node_id, int* parent_node_id, /* set the index of bucketmap */ set_bucketmap_index(result_plan, node_group_info_context); } - if (is_under_stream) + if (is_under_stream) { subplan_ids[0] = *plan_node_id; + } *parent_node_id = *plan_node_id; @@ -1688,8 +1728,9 @@ void finalize_node_id(Plan* result_plan, int* plan_node_id, int* parent_node_id, total_num_streams, max_push_sql_num, gather_count, subplans, subroots, initplans, subplan_ids, false, is_under_ctescan, is_data_node_exec, is_read_only, node_group_info_context); - } else + } else { break; + } /* * Note, the recursive-cte processing (stream mode), RecursiveUnion * operator is processed in a way like SubPlan initialization, we just @@ -1799,8 +1840,9 @@ void finalize_node_id(Plan* result_plan, int* plan_node_id, int* parent_node_id, rq->exec_nodes = get_plan_max_ExecNodes(result_plan->lefttree, subplans); (*num_streams) = 0; } - if (!rq->is_simple) + if (!rq->is_simple) { (*max_push_sql_num)++; + } /* mark num_gather include scan_gather plan_router gather in all plan */ rq->num_gather = *gather_count; @@ -1858,8 +1900,8 @@ void finalize_node_id(Plan* result_plan, int* plan_node_id, int* parent_node_id, case T_FunctionScan: { PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)((FunctionScan*)result_plan)->funcexpr); if (cursorPstmt != NULL) { - Stream* stream = (Stream*)cursorPstmt->planTree; - stream->cursor_owner_node_id = result_plan->plan_node_id; + cursorPstmt->planTree->cursor_owner_node_id = result_plan->plan_node_id; + set_stream_plan_cursor_walker(cursorPstmt->planTree); } } break; default: diff --git a/src/gausskernel/optimizer/plan/subselect.cpp b/src/gausskernel/optimizer/plan/subselect.cpp index 8df04c225a..6fa6ecc992 100644 --- a/src/gausskernel/optimizer/plan/subselect.cpp +++ b/src/gausskernel/optimizer/plan/subselect.cpp @@ -711,6 +711,8 @@ static Node* make_subplan( /* Reset u_sess->opt_cxt.query_dop. */ u_sess->opt_cxt.query_dop = outerDop; + /* Reset is_stream/is_stream_support because cursorExpr in subquery would change them */ + set_default_stream(); /* Isolate the params needed by this specific subplan */ plan_params = root->plan_params; root->plan_params = NIL; diff --git a/src/gausskernel/process/stream/execStream.cpp b/src/gausskernel/process/stream/execStream.cpp index ec9d7a31ee..9416ca1351 100755 --- a/src/gausskernel/process/stream/execStream.cpp +++ b/src/gausskernel/process/stream/execStream.cpp @@ -620,8 +620,8 @@ static void InitStream(StreamFlowCtl* ctl, StreamTransType transType) key.queryId = pstmt->queryId; key.planNodeId = plan->plan_node_id; - key.cursorExprLevel = streamNode->cursor_expr_level; - key.cursorParentNodeId = streamNode->cursor_owner_node_id; + key.cursorExprLevel = plan->cursor_expr_level; + key.cursorParentNodeId = plan->cursor_owner_node_id; /* * MPPDB with-recursive support */ @@ -973,11 +973,13 @@ static void InitStreamFlow(StreamFlowCtl* ctl) case T_FunctionScan: { PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)((FunctionScan*)oldPlan)->funcexpr); if (cursorPstmt != NULL) { - Stream* cursorPlan = (Stream*)(cursorPstmt->planTree); - ctl->plan = (Plan*)cursorPlan; + ctl->plan = cursorPstmt->planTree; + + PlannedStmt* oldPlan = ctl->cursorPstmt; ctl->cursorPstmt = cursorPstmt; InitStreamFlow(ctl); + ctl->cursorPstmt = oldPlan; break; } } break; @@ -1191,8 +1193,8 @@ void SetupStreamRuntime(StreamState* node) key.queryId = node->ss.ps.state->es_plannedstmt->queryId; key.planNodeId = streamNode->scan.plan.plan_node_id; - key.cursorExprLevel = streamNode->cursor_expr_level; - key.cursorParentNodeId = streamNode->cursor_owner_node_id; + key.cursorExprLevel = node->ss.ps.plan->cursor_expr_level; + key.cursorParentNodeId = node->ss.ps.plan->cursor_owner_node_id; Assert(u_sess->stream_cxt.global_obj != NULL); pair = u_sess->stream_cxt.global_obj->popStreamPair(key); @@ -1231,8 +1233,8 @@ static void StartupStreamThread(StreamState* node) key.queryId = node->ss.ps.state->es_plannedstmt->queryId; key.planNodeId = node->ss.ps.plan->plan_node_id; - key.cursorExprLevel = ((Stream*)node->ss.ps.plan)->cursor_expr_level; - key.cursorParentNodeId = ((Stream*)node->ss.ps.plan)->cursor_owner_node_id; + key.cursorExprLevel = node->ss.ps.plan->cursor_expr_level; + key.cursorParentNodeId = node->ss.ps.plan->cursor_owner_node_id; Assert(u_sess->stream_cxt.global_obj != NULL); pair = u_sess->stream_cxt.global_obj->popStreamPair(key); Assert(pair->producerList != NULL); diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index 42362a9387..0a5550a9f2 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -231,6 +231,7 @@ static void knl_u_optimizer_init(knl_u_optimizer_context* opt_cxt) opt_cxt->query_dop_store = 1; opt_cxt->query_dop = 1; opt_cxt->smp_enabled = true; + opt_cxt->is_under_cursor = false; opt_cxt->max_query_dop = -1; opt_cxt->parallel_debug_mode = 0; diff --git a/src/gausskernel/runtime/executor/spi.cpp b/src/gausskernel/runtime/executor/spi.cpp index ba85ce5472..8e58945b79 100644 --- a/src/gausskernel/runtime/executor/spi.cpp +++ b/src/gausskernel/runtime/executor/spi.cpp @@ -1667,6 +1667,7 @@ static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, ParamL #ifndef ENABLE_MULTIPLE_NODES AutoDopControl dopControl; dopControl.CloseSmp(); + dopControl.UnderCursor(); #endif NodeTag old_node_tag = t_thrd.postgres_cxt.cur_command_tag; diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 0889f858bf..66b3070078 100755 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -832,8 +832,10 @@ public: { if (likely(u_sess != NULL)) { m_smpEnabled = u_sess->opt_cxt.smp_enabled; + m_underCursor = u_sess->opt_cxt.is_under_cursor; } else { m_smpEnabled = true; + m_underCursor = false; } } @@ -841,6 +843,7 @@ public: { if (u_sess != NULL) { u_sess->opt_cxt.smp_enabled = m_smpEnabled; + u_sess->opt_cxt.is_under_cursor = m_underCursor; } } @@ -851,15 +854,24 @@ public: } } + void UnderCursor() + { + if (likely(u_sess != NULL)) { + u_sess->opt_cxt.is_under_cursor = true; + } + } + void ResetSmp() { if (u_sess != NULL) { u_sess->opt_cxt.smp_enabled = m_smpEnabled; + u_sess->opt_cxt.is_under_cursor = m_underCursor; } } private: bool m_smpEnabled; + bool m_underCursor; }; #ifdef USE_SPQ diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 058bd54e7b..7c5f779ddc 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -360,6 +360,7 @@ typedef struct knl_u_optimizer_context { /* Mark smp is enabled in procedure. */ bool smp_enabled; + bool is_under_cursor; double smp_thread_cost; diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 707cefe68b..86aa0cc067 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -381,6 +381,8 @@ typedef struct Plan { #ifdef USE_SPQ bool spq_scan_partial; #endif + int cursor_expr_level; + int cursor_owner_node_id; } Plan; typedef struct NdpScanCondition { // for each scan node diff --git a/src/include/optimizer/stream_cost.h b/src/include/optimizer/stream_cost.h index cee93c4aa8..4316d40fcb 100644 --- a/src/include/optimizer/stream_cost.h +++ b/src/include/optimizer/stream_cost.h @@ -89,8 +89,6 @@ typedef struct Stream { #ifdef USE_SPQ int streamID; #endif - int cursor_expr_level; - int cursor_owner_node_id; } Stream; extern void compute_stream_cost(StreamType type, char locator_type, double subrows, double subgblrows, diff --git a/src/include/parser/parse_expr.h b/src/include/parser/parse_expr.h index 763ae33979..1b9c1a9298 100644 --- a/src/include/parser/parse_expr.h +++ b/src/include/parser/parse_expr.h @@ -39,6 +39,6 @@ extern bool IsQuerySWCBRewrite(Query *query); extern bool IsSWCBRewriteRTE(RangeTblEntry *rte); extern Datum GetTypeZeroValue(Form_pg_attribute att_tup); typedef Datum (*getTypeZeroValueFunc)(Form_pg_attribute att_tup); -extern PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr); +extern PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr, CursorExpression** ce = NULL); #endif /* PARSE_EXPR_H */ diff --git a/src/test/regress/expected/cursor_expression.out b/src/test/regress/expected/cursor_expression.out index faa4fa1661..a0b5eb6815 100644 --- a/src/test/regress/expected/cursor_expression.out +++ b/src/test/regress/expected/cursor_expression.out @@ -915,6 +915,69 @@ select * from test_insert; pro | (3 rows) +reset behavior_compat_options; +create index on employees(employees_id); +explain (costs off) SELECT e1.name FROM employees e1 where employees_id < 10; + QUERY PLAN +------------------------------- + Seq Scan on employees e1 + Filter: (employees_id < 10) +(2 rows) + +set enable_auto_explain = on; +set auto_explain_level = notice; +-- test plan hint in cursor expression +DECLARE CURSOR c1 IS SELECT e.name, CURSOR(SELECT /*+ set(enable_seqscan off) */ e1.name FROM employees e1 where employees_id < 10) abc FROM employees e; + v_name VARCHAR2(10); + type emp_cur_type is ref cursor; + c2 emp_cur_type; + v_name2 VARCHAR2(10); +BEGIN + OPEN c1; + fetch c1 into v_name,c2; + raise notice 'company_name : % %',v_name, c2; + fetch c2 into v_name2; + raise notice 'employee_name : %',v_name2; + close c2; + CLOSE c1; +END; +/ +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: SELECT e.name, CURSOR(SELECT /*+ set(enable_seqscan off) */ e1.name FROM employees e1 where employees_id < 10)abc FROM employees e +Name: datanode1 +--?Seq Scan on cursor_expression.employees e.* + Output: name, CURSOR(SELECT /*+ set(enable_seqscan off) */ e1.name FROM employees e1 where employees_id < 10) + + +CONTEXT: PL/pgSQL function inline_code_block line 8 at FETCH +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +CONTEXT: PL/pgSQL function inline_code_block line 8 at FETCH +NOTICE: company_name : zhangsan +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: SELECT /*+ set(enable_seqscan off) */ e1.name FROM employees e1 where employees_id < 10 +Name: datanode1 +--?Index Scan using employees_employees_id_idx on cursor_expression.employees e1.* + Output: name + Index Cond: (e1.employees_id < 10) + + +CONTEXT: PL/pgSQL function inline_code_block line 10 at FETCH +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +CONTEXT: PL/pgSQL function inline_code_block line 10 at FETCH +NOTICE: employee_name : zhangsan +set enable_auto_explain = off; -- clean drop table test_insert; drop procedure pro_cursor_0011_02; diff --git a/src/test/regress/expected/parallel_enable_function.out b/src/test/regress/expected/parallel_enable_function.out index e192b9c95f..563eb3fbfe 100644 --- a/src/test/regress/expected/parallel_enable_function.out +++ b/src/test/regress/expected/parallel_enable_function.out @@ -307,6 +307,115 @@ select count(*) from multi_cursor_srf(cursor (select * from employees), cursor ( 100 (1 row) +-- query dop reset after error +explain (costs off) select count(*) from multi_cursor_srf(cursor (select * from multi_cursor_srf(cursor (select * from employees))), cursor (select * from employees)); +ERROR: function multi_cursor_srf(refcursor) does not exist +LINE 1: explain (costs off) select count(*) from multi_cursor_srf(cu... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +explain (costs off) select * from employees; + QUERY PLAN +---------------------------------------- + Streaming(type: LOCAL GATHER dop: 1/2) + -> Seq Scan on employees +(2 rows) + +-- test top plan of cursor expr is not stream +explain (costs off) select count(*) from hash_srf(cursor (select * from employees limit 10)), employees; + QUERY PLAN +-------------------------------------------------------------------- + Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Nested Loop + -> Streaming(type: LOCAL ROUNDROBIN dop: 2/1) + -> Function Scan on hash_srf + -> Materialize + -> Streaming(type: BROADCAST dop: 2/2) + -> Seq Scan on employees +(9 rows) + +select count(*) from hash_srf(cursor (select * from employees limit 10)), employees; + count +------- + 1000 +(1 row) + +explain (costs off) select count(*) from hash_srf(cursor (select * from employees a ,employees b)), employees limit 10; + QUERY PLAN +------------------------------------------------------------------------- + Limit + -> Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Nested Loop + -> Function Scan on hash_srf + -> Materialize + -> Streaming(type: BROADCAST dop: 2/2) + -> Seq Scan on employees +(9 rows) + +select count(*) from hash_srf(cursor (select * from employees a ,employees b)), employees limit 10; + count +--------- + 1000000 +(1 row) + +-- test initplan not smp +explain (costs off) select 1, (select count(*) from hash_srf(cursor (select * from employees))) a from employees; + QUERY PLAN +----------------------------------------- + Streaming(type: LOCAL GATHER) + InitPlan 1 (returns $0) + -> Aggregate + -> Function Scan on hash_srf + -> Seq Scan on employees +(5 rows) + +-- test plan hint +set query_dop = 1; +explain (costs off) select count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); -- not smp + QUERY PLAN +--------------------------------- + Aggregate + -> Function Scan on hash_srf +(2 rows) + +select count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); + count +------- + 100 +(1 row) + +explain (costs off) select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select * from employees)); -- not smp + QUERY PLAN +--------------------------------- + Aggregate + -> Function Scan on hash_srf +(2 rows) + +select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select * from employees)); + count +------- + 100 +(1 row) + +explain (costs off) select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); -- smp + QUERY PLAN +---------------------------------------------- + Aggregate + -> Streaming(type: LOCAL GATHER dop: 1/2) + -> Aggregate + -> Function Scan on hash_srf +(4 rows) + +select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); + count +------- + 100 +(1 row) + +set query_dop = 1002; -- nested function call explain (costs off) select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; QUERY PLAN diff --git a/src/test/regress/expected/smp_cursor.out b/src/test/regress/expected/smp_cursor.out index 451be46f8e..9cdb422bff 100644 --- a/src/test/regress/expected/smp_cursor.out +++ b/src/test/regress/expected/smp_cursor.out @@ -545,5 +545,55 @@ select a, cursor(select * from t1) from t1 limit 10; --?.* (10 rows) +-- smp hint in cursor expr among plpgsql does not work +set enable_auto_explain = on; +set auto_explain_level = notice; +-- test plan hint in cursor expression +DECLARE CURSOR c1 IS SELECT a, CURSOR(SELECT /*+ set(query_dop 1002) */ * FROM t1) abc FROM t1; + id int; + type emp_cur_type is ref cursor; + c2 emp_cur_type; + tmp t1%rowtype; +BEGIN + OPEN c1; + fetch c1 into id,c2; + fetch c2 into tmp; + close c2; + CLOSE c1; +END; +/ +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: SELECT a, CURSOR(SELECT /*+ set(query_dop 1002) */ * FROM t1)abc FROM t1 +Name: datanode1 +--?Seq Scan on smp_cursor.t1.* + Output: a, CURSOR(SELECT /*+ set(query_dop 1002) */ * FROM t1) + + +CONTEXT: PL/pgSQL function inline_code_block line 8 at FETCH +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +CONTEXT: PL/pgSQL function inline_code_block line 8 at FETCH +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: SELECT /*+ set(query_dop 1002) */ * FROM t1 +Name: datanode1 +--?Seq Scan on smp_cursor.t1.* + Output: a, b, c, d + + +CONTEXT: PL/pgSQL function inline_code_block line 9 at FETCH +NOTICE: +----------------------------NestLevel:0---------------------------- +--?duration.* + +CONTEXT: PL/pgSQL function inline_code_block line 9 at FETCH +set enable_auto_explain = off; drop schema smp_cursor cascade; NOTICE: drop cascades to table t1 diff --git a/src/test/regress/sql/cursor_expression.sql b/src/test/regress/sql/cursor_expression.sql index 80d5b6e6d3..435c59fb3b 100644 --- a/src/test/regress/sql/cursor_expression.sql +++ b/src/test/regress/sql/cursor_expression.sql @@ -527,6 +527,29 @@ create table test_insert(c1 varchar, c2 varchar); insert into test_insert SELECT department_name, CURSOR(SELECT e.name FROM employees e) FROM departments d; select * from test_insert; +reset behavior_compat_options; +create index on employees(employees_id); +explain (costs off) SELECT e1.name FROM employees e1 where employees_id < 10; +set enable_auto_explain = on; +set auto_explain_level = notice; +-- test plan hint in cursor expression +DECLARE CURSOR c1 IS SELECT e.name, CURSOR(SELECT /*+ set(enable_seqscan off) */ e1.name FROM employees e1 where employees_id < 10) abc FROM employees e; + v_name VARCHAR2(10); + type emp_cur_type is ref cursor; + c2 emp_cur_type; + v_name2 VARCHAR2(10); +BEGIN + OPEN c1; + fetch c1 into v_name,c2; + raise notice 'company_name : % %',v_name, c2; + fetch c2 into v_name2; + raise notice 'employee_name : %',v_name2; + close c2; + CLOSE c1; +END; +/ +set enable_auto_explain = off; + -- clean drop table test_insert; drop procedure pro_cursor_0011_02; diff --git a/src/test/regress/sql/parallel_enable_function.sql b/src/test/regress/sql/parallel_enable_function.sql index 64967b6241..5d10f154d0 100644 --- a/src/test/regress/sql/parallel_enable_function.sql +++ b/src/test/regress/sql/parallel_enable_function.sql @@ -144,6 +144,31 @@ select * from multi_cursor_srf(cursor (select * from employees), cursor (select explain (costs off) select count(*) from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)); select count(*) from multi_cursor_srf(cursor (select * from employees), cursor (select * from employees)); +-- query dop reset after error +explain (costs off) select count(*) from multi_cursor_srf(cursor (select * from multi_cursor_srf(cursor (select * from employees))), cursor (select * from employees)); +explain (costs off) select * from employees; + +-- test top plan of cursor expr is not stream +explain (costs off) select count(*) from hash_srf(cursor (select * from employees limit 10)), employees; +select count(*) from hash_srf(cursor (select * from employees limit 10)), employees; + +explain (costs off) select count(*) from hash_srf(cursor (select * from employees a ,employees b)), employees limit 10; +select count(*) from hash_srf(cursor (select * from employees a ,employees b)), employees limit 10; + +-- test initplan not smp +explain (costs off) select 1, (select count(*) from hash_srf(cursor (select * from employees))) a from employees; + +-- test plan hint +set query_dop = 1; +explain (costs off) select count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); -- not smp +select count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); + +explain (costs off) select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select * from employees)); -- not smp +select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select * from employees)); + +explain (costs off) select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); -- smp +select /*+ set(query_dop 1002) */ count(*) from hash_srf(cursor (select /*+ set(query_dop 1002) */ * from employees)); +set query_dop = 1002; -- nested function call explain (costs off) select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; select * from hash_srf(cursor (select * from hash_srf(cursor (select * from employees)))) limit 10; diff --git a/src/test/regress/sql/smp_cursor.sql b/src/test/regress/sql/smp_cursor.sql index f53242f8a9..3c8f8dc79c 100644 --- a/src/test/regress/sql/smp_cursor.sql +++ b/src/test/regress/sql/smp_cursor.sql @@ -84,4 +84,23 @@ set enable_auto_explain = off; explain (costs off) select a, cursor(select * from t1) from t1 limit 10; select a, cursor(select * from t1) from t1 limit 10; +-- smp hint in cursor expr among plpgsql does not work +set enable_auto_explain = on; +set auto_explain_level = notice; +-- test plan hint in cursor expression +DECLARE CURSOR c1 IS SELECT a, CURSOR(SELECT /*+ set(query_dop 1002) */ * FROM t1) abc FROM t1; + id int; + type emp_cur_type is ref cursor; + c2 emp_cur_type; + tmp t1%rowtype; +BEGIN + OPEN c1; + fetch c1 into id,c2; + fetch c2 into tmp; + close c2; + CLOSE c1; +END; +/ +set enable_auto_explain = off; + drop schema smp_cursor cascade; \ No newline at end of file -- Gitee From ea1180629a55e8d1ccd8728498163d38f61d4d16 Mon Sep 17 00:00:00 2001 From: q00421813 Date: Tue, 30 Jul 2024 16:37:34 +0800 Subject: [PATCH 130/347] =?UTF-8?q?1.toast=E8=A1=A8=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E6=89=A9=E5=85=85=E4=BA=8B=E5=8A=A1=E6=A7=BD=202.get=5Fmax=5Fo?= =?UTF-8?q?ffsetnumber=E7=BB=9F=E4=B8=80=E4=BD=BF=E7=94=A8am=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../backend/utils/adt/pgundostatfuncs.cpp | 6 +++++- src/gausskernel/runtime/executor/execMain.cpp | 6 ++---- .../runtime/executor/nodeModifyTable.cpp | 18 +++++++++--------- src/gausskernel/runtime/opfusion/opfusion.cpp | 6 ------ .../storage/access/common/reloptions.cpp | 4 ++-- .../storage/access/ustore/knl_uheap.cpp | 2 +- .../storage/access/ustore/knl_uredo.cpp | 14 ++++++++------ .../access/ustore/undo/knl_uundorecycle.cpp | 2 +- src/include/opfusion/opfusion.h | 2 -- 9 files changed, 28 insertions(+), 32 deletions(-) diff --git a/src/common/backend/utils/adt/pgundostatfuncs.cpp b/src/common/backend/utils/adt/pgundostatfuncs.cpp index 6fb4c68319..a451f57820 100644 --- a/src/common/backend/utils/adt/pgundostatfuncs.cpp +++ b/src/common/backend/utils/adt/pgundostatfuncs.cpp @@ -478,8 +478,12 @@ static bool ParseUndoRecord(UndoRecPtr urp, Tuplestorestate *tupstore, TupleDesc off_t seekpos; errno_t rc = EOK; uint32 ret = 0; - UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader)); UndoRecPtr blkprev = INVALID_UNDO_REC_PTR; + UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader)); + if (!urec) { + fprintf(stderr, "malloc UndoHeader failed, out of memory\n"); + return false; + } rc = memset_s(urec, sizeof(UndoHeader), (0), sizeof(UndoHeader)); securec_check(rc, "\0", "\0"); do { diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp index a47d075111..d362d2587a 100755 --- a/src/gausskernel/runtime/executor/execMain.cpp +++ b/src/gausskernel/runtime/executor/execMain.cpp @@ -3767,10 +3767,8 @@ void EvalPlanQualFetchRowMarks(EPQState *epqstate) Page page = BufferGetPage(buffer); ItemPointer tid = &tuple.t_self; OffsetNumber offnum = ItemPointerGetOffsetNumber(tid); - if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page)) { - ereport(LOG, (errcode(ERRCODE_FETCH_DATA_FAILED), - errmsg("out of range items"))); - } else if (offnum < FirstOffsetNumber || offnum > UHeapPageGetMaxOffsetNumber(page)) { + if (offnum < FirstOffsetNumber || offnum > + tableam_tops_page_get_max_offsetnumber(fakeRelation, page)) { ereport(LOG, (errcode(ERRCODE_FETCH_DATA_FAILED), errmsg("out of range items"))); } else { diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp index 43522e620c..c52624ffce 100644 --- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp +++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp @@ -860,7 +860,7 @@ static void ConstraintsForExecUpsert(Relation resultRelationDesc) } } -static void update_slot_tuple_info(TupleTableSlot* slot, Tuple tuple) +static void update_slot_tuple_info(TupleTableSlot* slot, Tuple tuple) { bool tuple_is_uheap = TUPLE_IS_UHEAP_TUPLE(tuple); if (slot->tts_tupslotTableAm == TAM_USTORE && !tuple_is_uheap) { @@ -875,14 +875,14 @@ static void update_slot_tuple_info(TupleTableSlot* slot, Tuple tuple) slot_tup->xc_node_id = htup->t_xc_node_id; } else if (slot->tts_tupslotTableAm == TAM_HEAP && tuple_is_uheap) { HeapTuple slot_tup = (HeapTuple) slot->tts_tuple; - UHeapTuple htup = (UHeapTuple) tuple; - slot_tup->t_self = htup->ctid; - slot_tup->t_tableOid = htup->table_oid; - slot_tup->t_xid_base = htup->t_xid_base; - slot_tup->t_multi_base = htup->t_multi_base; - slot_tup->xmin = htup->xmin; - slot_tup->xmax = htup->xmax; - slot_tup->t_xc_node_id = htup->xc_node_id; + UHeapTuple utup = (UHeapTuple) tuple; + slot_tup->t_self = utup->ctid; + slot_tup->t_tableOid = utup->table_oid; + slot_tup->t_xid_base = utup->t_xid_base; + slot_tup->t_multi_base = utup->t_multi_base; + slot_tup->xmin = utup->xmin; + slot_tup->xmax = utup->xmax; + slot_tup->t_xc_node_id = utup->xc_node_id; } else { Assert(false); } diff --git a/src/gausskernel/runtime/opfusion/opfusion.cpp b/src/gausskernel/runtime/opfusion/opfusion.cpp index 680b564ef9..4cd2c4fe5c 100644 --- a/src/gausskernel/runtime/opfusion/opfusion.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion.cpp @@ -310,11 +310,6 @@ void OpFusion::setCurrentOpFusionObj(OpFusion *obj) u_sess->exec_cxt.CurrentOpFusionObj = obj; } -void OpFusion::setOpFusionReuseObj(OpFusion *obj) -{ - u_sess->opfusion_reuse_ctx.opfusionObj = obj; -} - void OpFusion::checkPermission() { bool check = false; @@ -1117,7 +1112,6 @@ void OpFusion::tearDown(OpFusion *opfusion) delete opfusion; OpFusion::setCurrentOpFusionObj(NULL); - OpFusion::setOpFusionReuseObj(NULL); } void OpFusion::clearForCplan(OpFusion *opfusion, CachedPlanSource *psrc) diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 4f8eee2547..a246572877 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -2774,7 +2774,7 @@ void ForbidOutUsersToSetInnerOptions(List *userOptions) void ForbidUserToSetDefinedIndexOptions(Relation rel, List *options) { /* the following option must be in tab[] of default_reloptions(). */ - static const char *unchangedOpt[] = {"crossbucket", "storage_type", "index_txntype"}; + static const char *unchangedOpt[] = {"crossbucket", "storage_type"}; int firstInvalidOpt = -1; if (FindInvalidOption(options, unchangedOpt, lengthof(unchangedOpt), &firstInvalidOpt)) { @@ -3255,4 +3255,4 @@ static void SetUstoreDefaultFillfactor(void *rdopts, relopt_value *options, } } } -} \ No newline at end of file +} diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 47f9d494d4..2aa13218a4 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -3879,7 +3879,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId Assert(false); } - if ((!aggressiveSearch && tdCount >= TD_THRESHOLD_FOR_PAGE_SWITCH) || RelationIsToast(relation)) { + if (!aggressiveSearch && tdCount >= TD_THRESHOLD_FOR_PAGE_SWITCH) { /* * Do not extend TD array if the TD allocation request is * for an insert statement and the page already has diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index 5a9a46c10d..0f82723e4c 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -1258,8 +1258,10 @@ static Size PerformUpdateNewRedoAction(XLogReaderState *record, UpdateRedoBuffer static void UHeapXlogUpdate(XLogReaderState *record) { XlUndoHeader *xlnewundohdr = NULL; - UpdateRedoBuffers buffers = { 0 }; - RelFileNode rnode = {0}; + UpdateRedoBuffers buffers; + buffers.oldbuffer = { 0 }; + buffers.newbuffer = { 0 }; + RelFileNode rnode = { 0 }; BlockNumber oldblk = InvalidBlockNumber; BlockNumber newblk = InvalidBlockNumber; UHeapTupleData oldtup; @@ -1761,7 +1763,7 @@ static void UHeapXlogFreeze(XLogReaderState *record) XLogRecPtr lsn = record->EndRecPtr; XlUHeapFreeze *xlrec = (XlUHeapFreeze *)XLogRecGetData(record); TransactionId cutoffXid = xlrec->cutoff_xid; - RedoBufferInfo buffer; + RedoBufferInfo buffer = { 0 }; UHeapTupleData utuple; RelFileNode rnode; BlockNumber blkno = InvalidBlockNumber; @@ -1949,7 +1951,7 @@ static void UHeapUndoXlogPageRestore(char *curxlogptr, Buffer buffer, Page page) */ static void UHeapUndoXlogPage(XLogReaderState *record) { - RedoBufferInfo redoBuffInfo; + RedoBufferInfo redoBuffInfo = { 0 }; uint8 *flags = (uint8 *)XLogRecGetData(record); char *curxlogptr = (char *)((char *)flags + sizeof(uint8)); XLogRedoAction action = XLogReadBufferForRedo(record, 0, &redoBuffInfo); @@ -1993,7 +1995,7 @@ static void UHeapUndoXlogPage(XLogReaderState *record) static void UHeapUndoXlogResetXid(XLogReaderState *record) { - RedoBufferInfo redoBuffInfo; + RedoBufferInfo redoBuffInfo = { 0 }; XLogRecPtr lsn = record->EndRecPtr; XlUHeapUndoResetSlot *xlrec = (XlUHeapUndoResetSlot *)XLogRecGetData(record); XLogRedoAction action = XLogReadBufferForRedo(record, 0, &redoBuffInfo); @@ -2018,7 +2020,7 @@ static void UHeapUndoXlogResetXid(XLogReaderState *record) static void UHeapUndoXlogAbortSpecinsert(XLogReaderState *record) { - RedoBufferInfo redoBuffInfo; + RedoBufferInfo redoBuffInfo = { 0 }; uint8 *flags = (uint8 *)XLogRecGetData(record); XLogRecPtr lsn = record->EndRecPtr; XlUHeapUndoAbortSpecInsert *xlrec = (XlUHeapUndoAbortSpecInsert *)((char *)flags + sizeof(uint8)); diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index 0ac42cf997..530096fcc6 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -442,9 +442,9 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f XLogWaitFlush(lsn); } + zone->UnlockUndoZone(); zone->ReleaseSpace(startUndoPtr, endUndoPtr, &g_forceRecycleSize); zone->ReleaseSlotSpace(start, recycle, &g_forceRecycleSize); - zone->UnlockUndoZone(); } else { /* zone has nothing to recycle. */ break; diff --git a/src/include/opfusion/opfusion.h b/src/include/opfusion/opfusion.h index 0a70230f3b..cd4e8e3f3b 100644 --- a/src/include/opfusion/opfusion.h +++ b/src/include/opfusion/opfusion.h @@ -71,8 +71,6 @@ public: static void setCurrentOpFusionObj(OpFusion* obj); - static void setOpFusionReuseObj(OpFusion* obj); - static bool process(int op, StringInfo msg, char* completionTag, bool isTopLevel, bool* isQueryCompleted); static void SaveInGPC(OpFusion* obj); -- Gitee From 738240a729dbb0ad5b8c273761767fe26239e5cd Mon Sep 17 00:00:00 2001 From: laishenghao Date: Fri, 2 Aug 2024 15:29:53 +0800 Subject: [PATCH 131/347] =?UTF-8?q?=E5=A2=9E=E5=8A=A0GUC=E5=8F=82=E6=95=B0?= =?UTF-8?q?=E6=8E=A7=E5=88=B6=E9=A2=84=E5=88=9B=E5=BB=BAxlog=E7=9A=84?= =?UTF-8?q?=E5=8A=9F=E8=83=BD=E5=BC=80=E9=97=AD=E4=B8=8E=E9=98=88=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + src/common/backend/utils/misc/guc/guc_storage.cpp | 15 +++++++++++++++ .../utils/misc/postgresql_single.conf.sample | 1 + src/gausskernel/storage/access/transam/xlog.cpp | 12 ++++++++---- .../knl/knl_guc/knl_session_attr_storage.h | 1 + 5 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 39ab287849..87ebb5d0f2 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -733,6 +733,7 @@ max_sync_workers_per_subscription|int|0,262143|NULL|Maximum number of table sync walwriter_sleep_threshold|int64|1,50000|NULL|NULL| walwriter_cpu_bind|int|-1,2147483647|NULL|NULL| wal_file_init_num|int|0,1000000|NULL|NULL| +wal_file_preinit_threshold|int|1,100|NULL|Threshold for pre-initializing xlogs, in percentages.| catchup2normal_wait_time|int|-1,10000|ms|The maximal allowed duration for waiting from catchup to normal state.| max_concurrent_autonomous_transactions|int|0,1024|NULL|NULL| sync_config_strategy|enum|all_node,only_sync_node,none_node|NULL|Synchronization strategy for configuration files between host and standby.| diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 7b72bd5e47..ed30d27213 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -2562,6 +2562,21 @@ static void InitStorageConfigureNamesInt() NULL, NULL}, + {{"wal_file_preinit_threshold", + PGC_SIGHUP, + NODE_ALL, + WAL_SETTINGS, + gettext_noop("Threshold for pre-initializing xlogs, in percentages."), + NULL, + NULL}, + &u_sess->attr.attr_storage.walFilePreinitThreshold, + 100, + 1, + 100, + NULL, + NULL, + NULL}, + {{"advance_xlog_file_num", PGC_POSTMASTER, NODE_ALL, diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample index b85978598a..d37d95ad37 100644 --- a/src/common/backend/utils/misc/postgresql_single.conf.sample +++ b/src/common/backend/utils/misc/postgresql_single.conf.sample @@ -899,3 +899,4 @@ dolphin.nulls_minimal_policy = on # the inverse of the default configuration val #application_name = 'dn_master' #enable_nls = off +#wal_file_preinit_threshold = 100 # Threshold for pre-initializing xlogs, in percentages. diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 31b2b8ab9f..73f53d72f9 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -2718,15 +2718,19 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) t_thrd.xlog_cxt.openLogOff = 0; segs_enough = true; - if (g_instance.attr.attr_storage.wal_file_init_num > 0 && g_instance.wal_cxt.globalEndPosSegNo != InvalidXLogSegPtr && + const int fullThreshold = 100; + int threshold = u_sess->attr.attr_storage.walFilePreinitThreshold; + int initNum = g_instance.attr.attr_storage.wal_file_init_num; + if (threshold < fullThreshold && initNum > 0 && + g_instance.wal_cxt.globalEndPosSegNo != InvalidXLogSegPtr && g_instance.wal_cxt.globalEndPosSegNo >= t_thrd.xlog_cxt.openLogSegNo) { - segs_enough = (g_instance.wal_cxt.globalEndPosSegNo - t_thrd.xlog_cxt.openLogSegNo) - > (g_instance.attr.attr_storage.wal_file_init_num * 0.2); + segs_enough = (g_instance.wal_cxt.globalEndPosSegNo - t_thrd.xlog_cxt.openLogSegNo) > + (1.0 * initNum * (fullThreshold - threshold) / fullThreshold); } /* * Unlock WalAuxiliary thread to init new xlog segment if we are running out - * of xlog segments, or available segments is less than wal_file_init_num * 0.2. + * of xlog segments, or used segments is more than wal_file_preinit_threshold. */ if (!segs_enough) { g_instance.wal_cxt.globalEndPosSegNo = Max(g_instance.wal_cxt.globalEndPosSegNo, t_thrd.xlog_cxt.openLogSegNo); diff --git a/src/include/knl/knl_guc/knl_session_attr_storage.h b/src/include/knl/knl_guc/knl_session_attr_storage.h index ed796f3af2..7aff202998 100755 --- a/src/include/knl/knl_guc/knl_session_attr_storage.h +++ b/src/include/knl/knl_guc/knl_session_attr_storage.h @@ -306,6 +306,7 @@ typedef struct knl_session_attr_storage { char* uwal_path; bool handle_toast_in_autovac; bool enable_xlog_insert_record_group; + int walFilePreinitThreshold; /* pre-read parms */ int heap_bulk_read_size; -- Gitee From 2400949a780b3d1b3aaf636a13900e95a3495439 Mon Sep 17 00:00:00 2001 From: bowenliu Date: Wed, 31 Jul 2024 14:14:31 +0800 Subject: [PATCH 132/347] push dms commit --- src/gausskernel/ddes/ddes_commit_id | 4 ++-- src/include/ddes/dms/dms_api.h | 27 ++++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index aa61cf540b..31cce690c0 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=4e5c455f771c9be5e78260371eed2f28474cf253 +dms_commit_id=1e366d6a3ab36a7947d02d2664496d09e2a079ef dss_commit_id=083e52af8c7f965856f319554d6332b14f6b2c99 -cbb_commit_id=ac8ed05be35754e77032b4c9ec9b1eba53f1d5a6 +cbb_commit_id=49b0b0c664346a690c9fe9b537f264306dbdc46d diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index c0f29a600e..1c2421034d 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -34,7 +34,7 @@ extern "C" { #define DMS_LOCAL_MINOR_VER_WEIGHT 1000 #define DMS_LOCAL_MAJOR_VERSION 0 #define DMS_LOCAL_MINOR_VERSION 0 -#define DMS_LOCAL_VERSION 162 +#define DMS_LOCAL_VERSION 163 #define DMS_SUCCESS 0 #define DMS_ERROR (-1) @@ -60,6 +60,7 @@ extern "C" { #define DMS_VERSION_MAX_LEN 256 #define DMS_OCK_LOG_PATH_LEN 256 #define DMS_LOG_PATH_LEN (256) +#define DMS_CMD_DESC_LEN 64 // The values of the following two macros must be same with (GS_MAX_XA_BASE16_GTRID_LEN GS_MAX_XA_BASE16_BQUAL_LEN) #define DMS_MAX_XA_BASE16_GTRID_LEN (128) @@ -1371,6 +1372,30 @@ typedef struct st_driver_ping_info { unsigned long long minor_version; } driver_ping_info_t; +typedef struct st_mes_msg_info { + unsigned int cmd; + unsigned short sid; +} mes_msg_info_t; + +typedef struct st_mes_worker_msg_stats_info { + unsigned char is_active; + unsigned int tid; + int priority; + unsigned long long get_msgitem_time; + unsigned long long msg_ruid; + unsigned int msg_src_inst; + mes_msg_info_t msg_info; + char msg_cmd_desc[DMS_CMD_DESC_LEN]; +} mes_worker_msg_stats_info_t; + +typedef struct st_mes_task_priority_stats_info { + int priority; + unsigned int worker_num; + unsigned long long inqueue_msgitem_num; + unsigned long long finished_msgitem_num; + unsigned long long msgitem_free_num; +} mes_task_priority_stats_info_t; + #ifdef __cplusplus } #endif -- Gitee From 77d964c85c5cb4abd8f8c6885d3c68795eedd63f Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Fri, 2 Aug 2024 16:49:25 +0800 Subject: [PATCH 133/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=A4=9A=E4=B8=AAses?= =?UTF-8?q?sion=E9=97=B4=E9=9A=94=E6=89=A7=E8=A1=8Cselect=20for=20update?= =?UTF-8?q?=20wait=20X=20sec,=E7=AD=89=E5=BE=85=E7=9A=84=E9=A2=84=E6=9C=9F?= =?UTF-8?q?=E6=97=B6=E9=97=B4=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cbb/utils/gssignal/gs_signal.cpp | 15 +++++++++++++++ src/gausskernel/process/tcop/postgres.cpp | 5 +++++ .../process/threadpool/knl_thread.cpp | 1 + src/gausskernel/storage/lmgr/proc.cpp | 17 ++++++++++++++--- src/include/gssignal/gs_signal.h | 1 + src/include/knl/knl_thread.h | 1 + src/include/storage/proc.h | 2 +- 7 files changed, 38 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp b/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp index 2c30f0afbf..32a62ad969 100644 --- a/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp +++ b/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp @@ -1142,3 +1142,18 @@ gs_sigfunc gspqsignal(int signo, gs_sigfunc func) { return gs_signal_register_handler(t_thrd.signal_slot->gssignal, signo, func); } + +void gs_signal_get_timer(struct timeval* timeval) +{ + struct itimerspec restime; + /* Save rest time for future resume */ + if (timer_gettime(t_thrd.utils_cxt.sigTimerId, /* the created timer */ + &restime)) { + timeval->tv_sec = 0; + timeval->tv_usec = 0; + return; + } + timeval->tv_sec = restime.it_value.tv_sec; + timeval->tv_usec = restime.it_value.tv_nsec / 1000ULL; + return; +} diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index 1a8ea3221d..bb40c62031 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -3150,6 +3150,7 @@ static void exec_simple_query(const char* query_string, MessageType messageType, t_thrd.postgres_cxt.debug_query_string = NULL; t_thrd.postgres_cxt.cur_command_tag = T_Invalid; + t_thrd.storage_cxt.timer_continued = {0, 0}; /* * @hdfs @@ -5499,6 +5500,7 @@ static void exec_execute_message(const char* portal_name, long max_rows) t_thrd.postgres_cxt.debug_query_string = NULL; t_thrd.postgres_cxt.cur_command_tag = T_Invalid; + t_thrd.storage_cxt.timer_continued = {0, 0}; gstrace_exit(GS_TRC_ID_exec_execute_message); } @@ -8498,6 +8500,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam */ t_thrd.postgres_cxt.debug_query_string = NULL; t_thrd.postgres_cxt.cur_command_tag = T_Invalid; + t_thrd.storage_cxt.timer_continued = {0, 0}; if (u_sess->unique_sql_cxt.need_update_calls && is_unique_sql_enabled() && is_local_unique_sql()) { @@ -8655,6 +8658,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam u_sess->statement_cxt.executer_run_level = 0; initStringInfo(&input_message); + t_thrd.storage_cxt.timer_continued = {0, 0}; t_thrd.postgres_cxt.debug_query_string = NULL; t_thrd.postgres_cxt.cur_command_tag = T_Invalid; t_thrd.postgres_cxt.g_NoAnalyzeRelNameList = NIL; @@ -9679,6 +9683,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam pfree_ext(completionTag); t_thrd.postgres_cxt.debug_query_string = NULL; t_thrd.postgres_cxt.cur_command_tag = T_Invalid; + t_thrd.storage_cxt.timer_continued = {0, 0}; if (MEMORY_TRACKING_QUERY_PEAK) ereport(LOG, (errmsg("execute opfusion, peak memory %ld(kb)", (int64)(t_thrd.utils_cxt.peakedBytesInQueryLifeCycle/1024)))); diff --git a/src/gausskernel/process/threadpool/knl_thread.cpp b/src/gausskernel/process/threadpool/knl_thread.cpp index 4938f7d5f0..a20b0eb613 100755 --- a/src/gausskernel/process/threadpool/knl_thread.cpp +++ b/src/gausskernel/process/threadpool/knl_thread.cpp @@ -1411,6 +1411,7 @@ static void knl_t_storage_init(knl_t_storage_context* storage_cxt) storage_cxt->lockwait_timeout_active = false; storage_cxt->deadlock_state = DS_NOT_YET_CHECKED; storage_cxt->cancel_from_timeout = false; + storage_cxt->timer_continued = {0, 0}; storage_cxt->timeout_start_time = 0; storage_cxt->statement_fin_time = 0; storage_cxt->statement_fin_time2 = 0; diff --git a/src/gausskernel/storage/lmgr/proc.cpp b/src/gausskernel/storage/lmgr/proc.cpp index 931920101f..60aecb07bf 100755 --- a/src/gausskernel/storage/lmgr/proc.cpp +++ b/src/gausskernel/storage/lmgr/proc.cpp @@ -2154,7 +2154,13 @@ int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_u int needWaitTime = Max(1000, (allow_con_update ? u_sess->attr.attr_storage.LockWaitUpdateTimeout : u_sess->attr.attr_storage.LockWaitTimeout) - u_sess->attr.attr_storage.DeadlockTimeout); if (waitSec > 0) { - needWaitTime =Max(1, (waitSec * 1000) - u_sess->attr.attr_storage.DeadlockTimeout); + if (t_thrd.storage_cxt.timer_continued.tv_sec != 0 || t_thrd.storage_cxt.timer_continued.tv_usec != 0) { + int tmpWaitTime = t_thrd.storage_cxt.timer_continued.tv_sec * 1000 + + t_thrd.storage_cxt.timer_continued.tv_usec / 1000; + needWaitTime = Max(1, tmpWaitTime - u_sess->attr.attr_storage.DeadlockTimeout); + } else { + needWaitTime =Max(1, (waitSec * 1000) - u_sess->attr.attr_storage.DeadlockTimeout); + } } if (myWaitStatus == STATUS_WAITING && u_sess->attr.attr_storage.LockWaitTimeout > 0 && @@ -2168,7 +2174,7 @@ int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_u /* * Disable the timer, if it's still running */ - if (!disable_sig_alarm(false)) + if (!disable_sig_alarm(false, waitSec)) ereport(FATAL, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("could not disable timer for process wakeup"))); /* @@ -2844,7 +2850,7 @@ bool disable_idle_in_transaction_session_sig_alarm(void) * * Returns TRUE if okay, FALSE on failure. */ -bool disable_sig_alarm(bool is_statement_timeout) +bool disable_sig_alarm(bool is_statement_timeout, int waitSec) { /* * Always disable the interrupt if it is active; this avoids being @@ -2856,11 +2862,16 @@ bool disable_sig_alarm(bool is_statement_timeout) if (t_thrd.storage_cxt.statement_timeout_active || t_thrd.storage_cxt.deadlock_timeout_active || t_thrd.storage_cxt.lockwait_timeout_active || t_thrd.wlm_cxt.wlmalarm_timeout_active || u_sess->statement_cxt.query_plan_threshold_active) { + if (waitSec > 0) { + gs_signal_get_timer(&(t_thrd.storage_cxt.timer_continued)); + } + if (gs_signal_canceltimer()) { t_thrd.storage_cxt.statement_timeout_active = false; t_thrd.storage_cxt.cancel_from_timeout = false; t_thrd.storage_cxt.deadlock_timeout_active = false; t_thrd.storage_cxt.lockwait_timeout_active = false; + t_thrd.storage_cxt.timer_continued = {0, 0}; t_thrd.wlm_cxt.wlmalarm_timeout_active = false; u_sess->statement_cxt.query_plan_threshold_active = false; return false; diff --git a/src/include/gssignal/gs_signal.h b/src/include/gssignal/gs_signal.h index 2e932bc1c4..ab1dfcd8e5 100644 --- a/src/include/gssignal/gs_signal.h +++ b/src/include/gssignal/gs_signal.h @@ -173,6 +173,7 @@ extern int gs_signal_canceltimer(void); extern int gs_signal_deletetimer(void); extern void gs_signal_monitor_startup(void); +extern void gs_signal_get_timer(struct timeval* timeval); #ifdef WIN32 #define pgwin32_dispatch_queued_signals gs_signal_handle diff --git a/src/include/knl/knl_thread.h b/src/include/knl/knl_thread.h index 6b214e360b..068b3e6822 100755 --- a/src/include/knl/knl_thread.h +++ b/src/include/knl/knl_thread.h @@ -2800,6 +2800,7 @@ typedef struct knl_t_storage_context { volatile bool lockwait_timeout_active; volatile int deadlock_state; volatile bool cancel_from_timeout; + struct timeval timer_continued; /* timeout_start_time is set when log_lock_waits is true */ TimestampTz timeout_start_time; /* statement_fin_time is valid only if statement_timeout_active is true */ diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index d8f8403804..8a212b6d7b 100755 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -558,7 +558,7 @@ extern bool enable_query_plan_sig_alarm(int delayms); extern bool disable_session_sig_alarm(void); extern bool disable_idle_in_transaction_session_sig_alarm(void); -extern bool disable_sig_alarm(bool is_statement_timeout); +extern bool disable_sig_alarm(bool is_statement_timeout, int waitSec = 0); extern bool pause_sig_alarm(bool is_statement_timeout); extern bool resume_sig_alarm(bool is_statement_timeout); extern void handle_sig_alarm(SIGNAL_ARGS); -- Gitee From 884982cf5b37bd787a98019c3f9867aa69363fc0 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Sat, 3 Aug 2024 16:03:16 +0800 Subject: [PATCH 134/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/replication/libpqwalreceiver.cpp | 3 ++- src/gausskernel/storage/smgr/segment/segbuffer.cpp | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/replication/libpqwalreceiver.cpp b/src/gausskernel/storage/replication/libpqwalreceiver.cpp index 9fa54babb9..32efa053e4 100755 --- a/src/gausskernel/storage/replication/libpqwalreceiver.cpp +++ b/src/gausskernel/storage/replication/libpqwalreceiver.cpp @@ -1392,7 +1392,8 @@ bool libpqrcv_receive(int timeout, unsigned char *type, char **buffer, int *len) retcode = MAKE_SQLSTATE(sqlstate[0], sqlstate[1], sqlstate[2], sqlstate[3], sqlstate[4]); } if (retcode == ERRCODE_UNDEFINED_FILE) { - if (t_thrd.role != APPLY_WORKER) { + /* in SS dorado standby cluster, the walreceiver not need to receive wal */ + if (t_thrd.role != APPLY_WORKER && !SS_DORADO_STANDBY_CLUSTER) { ha_set_rebuild_connerror(WALSEGMENT_REBUILD, REPL_INFO_ERROR); } SpinLockAcquire(&walrcv->mutex); diff --git a/src/gausskernel/storage/smgr/segment/segbuffer.cpp b/src/gausskernel/storage/smgr/segment/segbuffer.cpp index 448f6acb6e..02c61d1728 100644 --- a/src/gausskernel/storage/smgr/segment/segbuffer.cpp +++ b/src/gausskernel/storage/smgr/segment/segbuffer.cpp @@ -33,7 +33,7 @@ #include "utils/resowner.h" #include "pgstat.h" #include "ddes/dms/ss_dms_bufmgr.h" - +#include "replication/ss_disaster_cluster.h" /* * Segment buffer, used for segment meta data, e.g., segment head, space map head. We separate segment * meta data buffer and normal data buffer (in bufmgr.cpp) to avoid potential dead locks. @@ -323,7 +323,7 @@ void SegFlushCheckDiskLSN(SegSpace *spc, RelFileNode rNode, ForkNumber forknum, BufferDesc *buf_desc, char *buf) { #ifndef USE_ASSERT_CHECKING - if (!IsInitdb && !RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS) { + if (!IsInitdb && !RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && !SS_DISASTER_STANDBY_CLUSTER) { dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(buf_desc->buf_id); XLogRecPtr lsn_on_mem = PageGetLSN(buf); /* latest page must satisfy condition: page lsn_on_disk bigger than transfered page which is latest page */ @@ -335,7 +335,7 @@ void SegFlushCheckDiskLSN(SegSpace *spc, RelFileNode rNode, ForkNumber forknum, } } #else - if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && ENABLE_VERIFY_PAGE_VERSION) { + if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && ENABLE_VERIFY_PAGE_VERSION && !SS_DISASTER_STANDBY_CLUSTER) { char *origin_buf = (char *)palloc(BLCKSZ + ALIGNOF_BUFFER); char *temp_buf = (char *)BUFFERALIGN(origin_buf); seg_physical_read(spc, rNode, forknum, blocknum, temp_buf); -- Gitee From dcf2fe4f48dce5ad748567d5bb9a077ba083288e Mon Sep 17 00:00:00 2001 From: laishenghao Date: Sat, 3 Aug 2024 16:27:58 +0800 Subject: [PATCH 135/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E7=BB=A7=E6=89=BF?= =?UTF-8?q?=E8=A1=A8=E4=B8=8D=E6=94=AF=E6=8C=81select=20FOR=20UPDATE=20SKI?= =?UTF-8?q?P=20LOCKED=20=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/prep/prepunion.cpp | 1 + .../regress/expected/skiplocked_inherits.out | 12 +++++++ src/test/regress/expected/skiplocked_post.out | 2 ++ src/test/regress/expected/skiplocked_prep.out | 35 +++++++++++++++++++ .../regress/expected/skiplocked_test1_1.out | 6 ++++ src/test/regress/parallel_schedule0 | 2 +- src/test/regress/sql/skiplocked_inherits.sql | 2 ++ src/test/regress/sql/skiplocked_post.sql | 2 ++ src/test/regress/sql/skiplocked_prep.sql | 19 +++++++++- src/test/regress/sql/skiplocked_test1_1.sql | 2 +- 10 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 src/test/regress/expected/skiplocked_inherits.out create mode 100644 src/test/regress/sql/skiplocked_inherits.sql diff --git a/src/gausskernel/optimizer/prep/prepunion.cpp b/src/gausskernel/optimizer/prep/prepunion.cpp index 44feb4f972..3cb0d9c3b0 100644 --- a/src/gausskernel/optimizer/prep/prepunion.cpp +++ b/src/gausskernel/optimizer/prep/prepunion.cpp @@ -1602,6 +1602,7 @@ static void expand_inherited_rtentry(PlannerInfo* root, RangeTblEntry* rte, Inde newrc->waitPolicy = oldrc->waitPolicy; newrc->waitSec = oldrc->waitSec; newrc->isParent = false; + newrc->bms_nodeids = oldrc->bms_nodeids; root->rowMarks = lappend(root->rowMarks, newrc); } diff --git a/src/test/regress/expected/skiplocked_inherits.out b/src/test/regress/expected/skiplocked_inherits.out new file mode 100644 index 0000000000..3c64163769 --- /dev/null +++ b/src/test/regress/expected/skiplocked_inherits.out @@ -0,0 +1,12 @@ +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +select * from skiplocked_inherits_1 order by 1 desc limit 1 FOR UPDATE SKIP LOCKED; + id | a1 +----+------------------- + 3 | {"name": "test3"} +(1 row) + diff --git a/src/test/regress/expected/skiplocked_post.out b/src/test/regress/expected/skiplocked_post.out index 922d3114b1..384534251a 100644 --- a/src/test/regress/expected/skiplocked_post.out +++ b/src/test/regress/expected/skiplocked_post.out @@ -4,3 +4,5 @@ drop view skiplocked_v3; drop table skiplocked_t1; drop table skiplocked_t2; drop table skiplocked_t3; +drop table skiplocked_inherits_2; +drop table skiplocked_inherits_1; diff --git a/src/test/regress/expected/skiplocked_prep.out b/src/test/regress/expected/skiplocked_prep.out index 239769052c..59e2f1cb5a 100644 --- a/src/test/regress/expected/skiplocked_prep.out +++ b/src/test/regress/expected/skiplocked_prep.out @@ -23,3 +23,38 @@ create table IF NOT EXISTS skiplocked_t3( )with (ORIENTATION=COLUMN); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "skiplocked_t3_pkey" for table "skiplocked_t3" insert into skiplocked_t3 values (1, 'one'); +-- test skiplocked with inherited table +drop table if exists skiplocked_inherits_1,skiplocked_inherits_2; +NOTICE: table "skiplocked_inherits_1" does not exist, skipping +NOTICE: table "skiplocked_inherits_2" does not exist, skipping +create table skiplocked_inherits_1( + id int unique, + a1 jsonb check(a1!='{}') +); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "skiplocked_inherits_1_id_key" for table "skiplocked_inherits_1" +CREATE TABLE skiplocked_inherits_2 ( + a2 jsonb default '{"name": "John", "age": 30}', + a3 jsonb not null +) INHERITS (skiplocked_inherits_1); +insert into skiplocked_inherits_2 values(1,'{"name":"test1"}','{"id":1001}','[null,"aaa"]'); +insert into skiplocked_inherits_2 values(2,'{"name":"test2"}',default,'["true"]'); +insert into skiplocked_inherits_2 values(3,'{"name":"test3"}','{"id":1003}','["a", {"b":1,"name": "John", "age": 30}]'); +insert into skiplocked_inherits_2 values(4,'{"name":"test"}',default,'["null","T"]'); +select * from skiplocked_inherits_1 order by id; + id | a1 +----+------------------- + 1 | {"name": "test1"} + 2 | {"name": "test2"} + 3 | {"name": "test3"} + 4 | {"name": "test"} +(4 rows) + +select * from skiplocked_inherits_2 order by id; + id | a1 | a2 | a3 +----+-------------------+-----------------------------+-------------------------------------------- + 1 | {"name": "test1"} | {"id": 1001} | [null, "aaa"] + 2 | {"name": "test2"} | {"age": 30, "name": "John"} | ["true"] + 3 | {"name": "test3"} | {"id": 1003} | ["a", {"b": 1, "age": 30, "name": "John"}] + 4 | {"name": "test"} | {"age": 30, "name": "John"} | ["null", "T"] +(4 rows) + diff --git a/src/test/regress/expected/skiplocked_test1_1.out b/src/test/regress/expected/skiplocked_test1_1.out index b522f995ae..dbb2260f2b 100644 --- a/src/test/regress/expected/skiplocked_test1_1.out +++ b/src/test/regress/expected/skiplocked_test1_1.out @@ -1,5 +1,11 @@ begin; update skiplocked_t1 set info = 'two2' where id = 2; +select * from skiplocked_inherits_1 order by 1 desc limit 1 FOR UPDATE SKIP LOCKED; + id | a1 +----+------------------ + 4 | {"name": "test"} +(1 row) + select pg_sleep(5); pg_sleep ---------- diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 133fa781cf..d396ebe1a1 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -1016,7 +1016,7 @@ test: ledger_table_case # select ... for update skip locked test: skiplocked_prep -test: skiplocked_test1_1 skiplocked_test1_2 +test: skiplocked_test1_1 skiplocked_test1_2 skiplocked_inherits test: skiplocked_test1_1 skiplocked_test1_3 test: skiplocked_post diff --git a/src/test/regress/sql/skiplocked_inherits.sql b/src/test/regress/sql/skiplocked_inherits.sql new file mode 100644 index 0000000000..f81d40848f --- /dev/null +++ b/src/test/regress/sql/skiplocked_inherits.sql @@ -0,0 +1,2 @@ +select pg_sleep(1); +select * from skiplocked_inherits_1 order by 1 desc limit 1 FOR UPDATE SKIP LOCKED; diff --git a/src/test/regress/sql/skiplocked_post.sql b/src/test/regress/sql/skiplocked_post.sql index fa6a3b124c..4bc3099837 100644 --- a/src/test/regress/sql/skiplocked_post.sql +++ b/src/test/regress/sql/skiplocked_post.sql @@ -5,3 +5,5 @@ drop view skiplocked_v3; drop table skiplocked_t1; drop table skiplocked_t2; drop table skiplocked_t3; +drop table skiplocked_inherits_2; +drop table skiplocked_inherits_1; diff --git a/src/test/regress/sql/skiplocked_prep.sql b/src/test/regress/sql/skiplocked_prep.sql index 6616ca9716..bb83fce049 100644 --- a/src/test/regress/sql/skiplocked_prep.sql +++ b/src/test/regress/sql/skiplocked_prep.sql @@ -23,4 +23,21 @@ create table IF NOT EXISTS skiplocked_t3( info text, primary key (id) )with (ORIENTATION=COLUMN); -insert into skiplocked_t3 values (1, 'one'); \ No newline at end of file +insert into skiplocked_t3 values (1, 'one'); + +-- test skiplocked with inherited table +drop table if exists skiplocked_inherits_1,skiplocked_inherits_2; +create table skiplocked_inherits_1( + id int unique, + a1 jsonb check(a1!='{}') +); +CREATE TABLE skiplocked_inherits_2 ( + a2 jsonb default '{"name": "John", "age": 30}', + a3 jsonb not null +) INHERITS (skiplocked_inherits_1); +insert into skiplocked_inherits_2 values(1,'{"name":"test1"}','{"id":1001}','[null,"aaa"]'); +insert into skiplocked_inherits_2 values(2,'{"name":"test2"}',default,'["true"]'); +insert into skiplocked_inherits_2 values(3,'{"name":"test3"}','{"id":1003}','["a", {"b":1,"name": "John", "age": 30}]'); +insert into skiplocked_inherits_2 values(4,'{"name":"test"}',default,'["null","T"]'); +select * from skiplocked_inherits_1 order by id; +select * from skiplocked_inherits_2 order by id; diff --git a/src/test/regress/sql/skiplocked_test1_1.sql b/src/test/regress/sql/skiplocked_test1_1.sql index eb6400a9f8..6ca95f5959 100644 --- a/src/test/regress/sql/skiplocked_test1_1.sql +++ b/src/test/regress/sql/skiplocked_test1_1.sql @@ -1,7 +1,7 @@ begin; update skiplocked_t1 set info = 'two2' where id = 2; - +select * from skiplocked_inherits_1 order by 1 desc limit 1 FOR UPDATE SKIP LOCKED; select pg_sleep(5); end; -- Gitee From 34353a2bcdaf68752bc42e96f281b754f8ce82a2 Mon Sep 17 00:00:00 2001 From: lukeman Date: Sat, 3 Aug 2024 16:30:23 +0800 Subject: [PATCH 136/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9Ags=5Fpr?= =?UTF-8?q?obackup=20--help=E4=B8=AD=E6=97=A0s3=5Foptions=E8=AF=B4?= =?UTF-8?q?=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/help.cpp | 139 ++++++++++++++++++++++++++++++++-- 1 file changed, 131 insertions(+), 8 deletions(-) diff --git a/src/bin/pg_probackup/help.cpp b/src/bin/pg_probackup/help.cpp index a16180ae63..f22612359c 100644 --- a/src/bin/pg_probackup/help.cpp +++ b/src/bin/pg_probackup/help.cpp @@ -86,6 +86,8 @@ void help_pg_probackup(void) printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n")); @@ -93,6 +95,8 @@ void help_pg_probackup(void) printf(_(" [--help]\n")); printf(_("\n %s del-instance -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); printf(_(" [--help]\n")); printf(_("\n %s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); @@ -115,6 +119,8 @@ void help_pg_probackup(void) printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n")); @@ -123,15 +129,22 @@ void help_pg_probackup(void) printf(_("\n %s set-backup -B backup-path --instance=instance_name -i backup-id\n"), PROGRAM_NAME); printf(_(" [--note=text] [--ttl=interval] [--expire-time=time]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); + printf(_(" [--s3-status=s3|local]\n")); printf(_(" [--help]\n")); printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [--format=plain|json]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); printf(_(" [--help]\n")); printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME); printf(_(" [--instance=instance_name [-i backup-id]]\n")); printf(_(" [--archive] [--format=plain|json]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); printf(_(" [--help]\n")); printf(_("\n %s backup -B backup-path --instance=instance_name -b backup-mode\n"), PROGRAM_NAME); @@ -161,6 +174,8 @@ void help_pg_probackup(void) printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n")); @@ -181,6 +196,8 @@ void help_pg_probackup(void) printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n")); @@ -218,6 +235,8 @@ void help_pg_probackup(void) printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); printf(_(" [--log-rotation-age=log-rotation-age]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); printf(_(" [--help]\n")); printf(_("\n %s validate -B backup-path\n"), PROGRAM_NAME); @@ -233,6 +252,8 @@ void help_pg_probackup(void) printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); printf(_(" [--log-rotation-age=log-rotation-age]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); printf(_(" [--help]\n")); exit(0); @@ -251,9 +272,11 @@ static void help_add_instance(void) printf(_(" [-E external-directories-paths]\n")); printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); - printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n\n")); + printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n\n")); @@ -278,6 +301,14 @@ static void help_add_instance(void) printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); + #ifndef ENABLE_LITE_MODE printf(_("\n DSS options:\n")); printf(_(" --enable-dss enable shared storage mode\n")); @@ -289,10 +320,20 @@ static void help_add_instance(void) static void help_del_instance(void) { - printf(_("\n%s del-instance -B backup-path --instance=instance_name\n\n"), PROGRAM_NAME); + printf(_("\n%s del-instance -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance to delete\n\n")); + + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); } static void help_set_config(void) @@ -318,6 +359,8 @@ static void help_set_config(void) printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n\n")); printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n\n")); @@ -384,6 +427,14 @@ static void help_set_config(void) printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); + #ifndef ENABLE_LITE_MODE printf(_("\n DSS options:\n")); printf(_(" --enable-dss enable shared storage mode\n")); @@ -396,7 +447,10 @@ static void help_set_config(void) static void help_set_backup(void) { printf(_("\n%s set-backup -B backup-path --instance=instance_name -i backup-id\n"), PROGRAM_NAME); - printf(_(" [--note=text] [--ttl=interval] [--expire-time=time]\n\n")); + printf(_(" [--note=text] [--ttl=interval] [--expire-time=time]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); + printf(_(" [--s3-status=s3|local]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -408,16 +462,35 @@ static void help_set_backup(void) printf(_(" (example: --ttl=20d)\n")); printf(_(" --expire-time=time pin backup until specified time stamp\n")); printf(_(" (example: --expire-time='2024-01-01 00:00:00+03')\n\n")); + + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); + printf(_(" --s3-status current backup set status\n")); } static void help_show_config(void) { printf(_("\n%s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); - printf(_(" [--format=format]\n\n")); + printf(_(" [--format=format]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); printf(_(" --format=format show format=PLAIN|JSON\n\n")); + + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); } static void help_show(void) @@ -425,12 +498,22 @@ static void help_show(void) printf(_("\n%s show -B backup-path\n"), PROGRAM_NAME); printf(_(" [--instance=instance_name [-i backup-id]]\n")); printf(_(" [--archive] [--format=format]\n\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name show info about specific instance\n")); printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); printf(_(" --archive show WAL archive information\n")); printf(_(" --format=format show format=PLAIN|JSON\n\n")); + + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); } static void help_backup(void) @@ -462,12 +545,14 @@ static void help_backup(void) printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n")); #endif - printf(_(" [--ttl=interval] [--expire-time=time]\n\n")); - printf(_(" [--backup-pg-replslot]\n")); + printf(_(" [--ttl=interval] [--expire-time=time]\n")); + printf(_(" [--backup-pg-replslot]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -553,6 +638,14 @@ static void help_backup(void) printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); + #ifndef ENABLE_LITE_MODE printf(_("\n DSS options:\n")); printf(_(" --enable-dss enable shared storage mode\n")); @@ -583,6 +676,8 @@ static void help_restore(void) printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); printf(_(" [--remote-libpath=libpath]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n")); #ifndef ENABLE_LITE_MODE printf(_(" [--enable-dss] [--instance-id=instance_id]\n")); printf(_(" [--vgname=\"vgdata,vglog\"] [--socketpath=socketpath]\n")); @@ -635,6 +730,14 @@ static void help_restore(void) printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); + #ifndef ENABLE_LITE_MODE printf(_("\n DSS options:\n")); printf(_(" --enable-dss enable shared storage mode\n")); @@ -719,7 +822,9 @@ static void help_delete(void) printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); - printf(_(" [--log-rotation-age=log-rotation-age]\n\n")); + printf(_(" [--log-rotation-age=log-rotation-age]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -762,6 +867,14 @@ static void help_delete(void) printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n")); + + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); } static void help_validate(void) @@ -778,7 +891,9 @@ static void help_validate(void) printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); - printf(_(" [--log-rotation-age=log-rotation-age]\n\n")); + printf(_(" [--log-rotation-age=log-rotation-age]\n")); + printf(_(" [--media-type=type] [--access-id=ak] [--access-key=sk]\n")); + printf(_(" [--access-bucket=bucket] [--endpoint=endpoint] [--region=region]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -816,4 +931,12 @@ static void help_validate(void) printf(_(" --log-rotation-age=log-rotation-age\n")); printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n")); + + printf(_("\n S3 options:\n")); + printf(_(" --media-type=type media type for storing backups, including: s3 and disk\n")); + printf(_(" --access-id=ak access key id, used to identify the user\n")); + printf(_(" --access-key=sk secret access key, used to verify the user's identity\n")); + printf(_(" --access-bucket=bucket name of the bucket created on the object storage\n")); + printf(_(" --endpoint=endpoint accessing domain name which can be in the form of IP: port\n")); + printf(_(" --region=region geographical region, optional parameters\n")); } -- Gitee From a4db38c9b7138456191cf7f6d6bcda6683e92f07 Mon Sep 17 00:00:00 2001 From: jemappellehc <386956049@qq.com> Date: Sat, 3 Aug 2024 17:16:11 +0800 Subject: [PATCH 137/347] fix debugger function core --- src/common/pl/plpgsql/src/pl_debugger.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/common/pl/plpgsql/src/pl_debugger.cpp b/src/common/pl/plpgsql/src/pl_debugger.cpp index d62315b41d..ca2b90385f 100644 --- a/src/common/pl/plpgsql/src/pl_debugger.cpp +++ b/src/common/pl/plpgsql/src/pl_debugger.cpp @@ -139,6 +139,10 @@ static void init_debug_server(PLpgSQL_function* func, int socketId, int debugSta void check_debug(PLpgSQL_function* func, PLpgSQL_execstate* estate) { + if (func->fn_oid == InvalidOid) { + return; + } + bool found = false; bool need_continue_into = u_sess->plsql_cxt.cur_debug_server != NULL && ActiveBPInFunction(u_sess->plsql_cxt.cur_debug_server, func->fn_oid); @@ -221,8 +225,12 @@ void server_debug_main(PLpgSQL_function* func, PLpgSQL_execstate* estate) { DebugInfo* debug_ptr = func->debug; debug_ptr->cur_stmt = estate->err_stmt; + if (unlikely(debug_ptr == NULL || debug_ptr->comm == NULL)) { + ereport(ERROR, (errmodule(MOD_PLDEBUGGER), + errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("Invalid debug info from func %s", func->fn_signature))); + } PlDebuggerComm* debug_comm = &g_instance.pldebug_cxt.debug_comm[debug_ptr->comm->comm_idx]; - Assert(debug_ptr != NULL); /* stop to wait client conn if need */ debug_ptr->stop_next_stmt = debug_ptr->stop_next_stmt || IsBreakPointExisted(debug_ptr, func->fn_oid, estate->err_stmt->lineno, true); -- Gitee From 23ebfe050a25fa01bdb705f2c0b701cb807e5db8 Mon Sep 17 00:00:00 2001 From: q00421813 Date: Sat, 3 Aug 2024 10:30:49 +0800 Subject: [PATCH 138/347] =?UTF-8?q?1.=E4=BF=AE=E5=A4=8Dundofile=E6=89=BE?= =?UTF-8?q?=E4=B8=8D=E5=88=B0=E7=9A=84=E9=97=AE=E9=A2=98=202.UndoScanDescD?= =?UTF-8?q?ata=203.=E4=BF=AE=E5=A4=8Dmemcheck=E6=A8=A1=E5=BC=8Fmemory=20le?= =?UTF-8?q?ak?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/tcop/postgres.cpp | 7 +++++++ .../opfusion/opfusion_indexonlyscan.cpp | 10 ++++++---- .../storage/access/index/indexam.cpp | 4 ++-- .../storage/access/ubtree/ubtree.cpp | 7 ++++--- .../storage/access/ustore/knl_uscan.cpp | 4 ++-- .../storage/access/ustore/knl_uundorecord.cpp | 19 +++++++++---------- .../storage/access/ustore/knl_uundovec.cpp | 1 - 7 files changed, 30 insertions(+), 22 deletions(-) diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index 1a8ea3221d..61927296bc 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -9301,6 +9301,13 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("wrong role name."))); } + if (unlikely(IsAbortedTransactionBlockState())) { + if (InReceivingLocalUserIdChange()) { + SetUserIdAndSecContext(GetOldUserId(true), + u_sess->misc_cxt.SecurityRestrictionContext & (~RECEIVER_LOCAL_USERID_CHANGE)); + } + break; + } Oid role_oid = GetRoleOid(role_name); Oid save_userid = InvalidOid; int save_sec_context = 0; diff --git a/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp b/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp index e3f3e19f26..ad35d42037 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp @@ -290,7 +290,6 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal() /* * Fill the scan tuple slot with data from the index. */ - IndexTuple tmptup = NULL; index_deform_tuple(indexdesc->xs_itup, RelationGetDescr(rel), m_values, m_isnull); if (indexdesc->xs_recheck && EpqCheck(m_values, m_isnull)) { continue; @@ -303,9 +302,12 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal() m_tmpisnull[i] = m_isnull[m_attrno[i] - 1]; } - tmptup = index_form_tuple(m_tupDesc, m_tmpvals, m_tmpisnull); - Assert(tmptup != NULL); - StoreIndexTuple(m_reslot, tmptup, m_tupDesc); + (void)ExecClearTuple(m_reslot); + for (int i = 0; i < m_tupDesc->natts; i++) { + m_reslot->tts_values[i] = m_tmpvals[i]; + m_reslot->tts_isnull[i] = m_tmpisnull[i]; + } + (void)ExecStoreVirtualTuple(m_reslot); tableam_tslot_getsomeattrs(m_reslot, m_tupDesc->natts); ExecDropSingleTupleTableSlot(tmpreslot); diff --git a/src/gausskernel/storage/access/index/indexam.cpp b/src/gausskernel/storage/access/index/indexam.cpp index 999398d2b2..f450adb941 100644 --- a/src/gausskernel/storage/access/index/indexam.cpp +++ b/src/gausskernel/storage/access/index/indexam.cpp @@ -621,7 +621,7 @@ UHeapTuple UHeapamIndexFetchTuple(IndexScanDesc scan, bool *all_dead, bool* has_ uheapTuple = UHeapSearchBuffer(tid, rel, scan->xs_cbuf, scan->xs_snapshot, all_dead); } else { if (!scan->xs_continue_hot) { - scan->xc_undo_scan = (UstoreUndoScanDesc)palloc0(sizeof(UstoreUndoScanDesc)); + scan->xc_undo_scan = (UstoreUndoScanDesc)palloc0(sizeof(UstoreUndoScanDescData)); undoChainEnd = UHeapSearchBufferShowAnyTuplesFirstCall(tid, rel, scan->xs_cbuf, scan->xc_undo_scan); } else { undoChainEnd = UHeapSearchBufferShowAnyTuplesFromUndo(tid, rel, scan->xs_cbuf, scan->xc_undo_scan); @@ -701,7 +701,7 @@ TupleTableSlot *slot, bool *callAgain, bool *allDead, bool* has_cur_xact_write) has_cur_xact_write); } else { if (!scan->xs_continue_hot) { - scan->xc_undo_scan = (UstoreUndoScanDesc)palloc0(sizeof(UstoreUndoScanDesc)); + scan->xc_undo_scan = (UstoreUndoScanDesc)palloc0(sizeof(UstoreUndoScanDescData)); undoChainEnd = UHeapSearchBufferShowAnyTuplesFirstCall(tid, rel, scan->xs_cbuf, scan->xc_undo_scan); } else { undoChainEnd = UHeapSearchBufferShowAnyTuplesFromUndo(tid, rel, scan->xs_cbuf, scan->xc_undo_scan); diff --git a/src/gausskernel/storage/access/ubtree/ubtree.cpp b/src/gausskernel/storage/access/ubtree/ubtree.cpp index c8716db49c..daae414351 100644 --- a/src/gausskernel/storage/access/ubtree/ubtree.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtree.cpp @@ -905,9 +905,10 @@ restart: if (minoff <= maxoff) { stats->num_index_tuples += maxoff - minoff + 1; } - - if(vstate->cycleid !=0 && opaque->btpo_cycleid == vstate->cycleid - && !(opaque->btpo_flags & BTP_SPLIT_END) && !P_RIGHTMOST(opaque) && opaque->btpo_next < origBlkno){ + + if (vstate->cycleid != 0 && opaque->btpo_cycleid == vstate->cycleid + && !(opaque->btpo_flags & BTP_SPLIT_END) + && !P_RIGHTMOST(opaque) && opaque->btpo_next < origBlkno) { recurseTo = opaque->btpo_next; } } diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index b2c8af3dfb..8acf69dc0c 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -429,8 +429,8 @@ get_next_tuple: } else { if (!scan->xs_continue_undo) { ItemPointerSet(&scan->curTid, BufferGetBlockNumber(scan->rs_base.rs_cbuf), lineoff); - errno_t rc = memset_s(scan->xc_undo_scan, sizeof(UstoreUndoScanDesc), - 0, sizeof(UstoreUndoScanDesc)); + errno_t rc = memset_s(scan->xc_undo_scan, sizeof(UstoreUndoScanDescData), + 0, sizeof(UstoreUndoScanDescData)); securec_check(rc, "\0", "\0"); undoChainEnd = UHeapSearchBufferShowAnyTuplesFirstCall(&scan->curTid, scan->rs_base.rs_rd, scan->rs_base.rs_cbuf, scan->xc_undo_scan); diff --git a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp index c41bd3efc3..7e18b284b4 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp @@ -492,18 +492,17 @@ static UndoRecordState LoadUndoRecord(UndoRecord *urec, TransactionId *lastXid) MemoryContext oldContext = MemoryContextSwitchTo(currentContext); t_thrd.int_cxt.CritSectionCount = saveCritSectionCount; state = undo::CheckUndoRecordValid(urec->Urp(), true, lastXid); - if (BufferIsValid(urec->Buff())) { - if (LWLockHeldByMeInMode(BufferDescriptorGetContentLock( - GetBufferDescriptor(urec->Buff() - 1)), LW_SHARED)) { - LockBuffer(urec->Buff(), BUFFER_LOCK_UNLOCK); - } - ReleaseBuffer(urec->Buff()); - urec->SetBuff(InvalidBuffer); - } if (state == UNDO_RECORD_DISCARD || state == UNDO_RECORD_FORCE_DISCARD) { t_thrd.undo_cxt.fetchRecord = false; t_thrd.int_cxt.InterruptHoldoffCount = saveInterruptHoldoffCount; - EmitErrorReport(); + if (BufferIsValid(urec->Buff())) { + if (LWLockHeldByMeInMode(BufferDescriptorGetContentLock( + GetBufferDescriptor(urec->Buff() - 1)), LW_SHARED)) { + LockBuffer(urec->Buff(), BUFFER_LOCK_UNLOCK); + } + ReleaseBuffer(urec->Buff()); + urec->SetBuff(InvalidBuffer); + } FlushErrorState(); return state; } else { @@ -701,4 +700,4 @@ void UndoRecordVerify(_in_ UndoRecord *urec) errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d , payLoadLen %hu, uinfo %d"), urec->Utype(), urec->PayLoadLen(), (int)urec->Uinfo()))); } -} \ No newline at end of file +} diff --git a/src/gausskernel/storage/access/ustore/knl_uundovec.cpp b/src/gausskernel/storage/access/ustore/knl_uundovec.cpp index a378c2c491..1a9cd6d85b 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundovec.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundovec.cpp @@ -420,7 +420,6 @@ static bool LoadUndoRecordRange(UndoRecord *urec, Buffer *buffer) if (state == UNDO_RECORD_DISCARD || state == UNDO_RECORD_FORCE_DISCARD) { t_thrd.undo_cxt.fetchRecord = false; t_thrd.int_cxt.InterruptHoldoffCount = saveInterruptHoldoffCount; - EmitErrorReport(); FlushErrorState(); return false; } else { -- Gitee From c137df4a1bc2c276d2b8aa985a6a63f00ba9930d Mon Sep 17 00:00:00 2001 From: jemappellehc <386956049@qq.com> Date: Mon, 5 Aug 2024 09:28:25 +0800 Subject: [PATCH 139/347] fix debugger function core --- src/common/pl/plpgsql/src/pl_debugger.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/pl/plpgsql/src/pl_debugger.cpp b/src/common/pl/plpgsql/src/pl_debugger.cpp index ca2b90385f..666634c202 100644 --- a/src/common/pl/plpgsql/src/pl_debugger.cpp +++ b/src/common/pl/plpgsql/src/pl_debugger.cpp @@ -224,12 +224,12 @@ static void set_debugger_procedure_state(int commIdx, bool state) void server_debug_main(PLpgSQL_function* func, PLpgSQL_execstate* estate) { DebugInfo* debug_ptr = func->debug; - debug_ptr->cur_stmt = estate->err_stmt; if (unlikely(debug_ptr == NULL || debug_ptr->comm == NULL)) { ereport(ERROR, (errmodule(MOD_PLDEBUGGER), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("Invalid debug info from func %s", func->fn_signature))); } + debug_ptr->cur_stmt = estate->err_stmt; PlDebuggerComm* debug_comm = &g_instance.pldebug_cxt.debug_comm[debug_ptr->comm->comm_idx]; /* stop to wait client conn if need */ debug_ptr->stop_next_stmt = debug_ptr->stop_next_stmt || -- Gitee From 8431361f524b85278b2c936154e842dcd69a9d97 Mon Sep 17 00:00:00 2001 From: dongning12 Date: Fri, 2 Aug 2024 15:40:25 +0800 Subject: [PATCH 140/347] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=E3=80=90=E5=90=8C=E6=AD=A5DMS=E7=82=B9?= =?UTF-8?q?=E3=80=918.2=E5=90=8C=E6=AD=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 2 +- src/include/ddes/dms/dms_api.h | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 31cce690c0..61d908823f 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=1e366d6a3ab36a7947d02d2664496d09e2a079ef +dms_commit_id=8b64ce46c8cfa9a978604b346b0d32b264c8ee6c dss_commit_id=083e52af8c7f965856f319554d6332b14f6b2c99 cbb_commit_id=49b0b0c664346a690c9fe9b537f264306dbdc46d diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index 1c2421034d..c5b98f264f 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -34,7 +34,7 @@ extern "C" { #define DMS_LOCAL_MINOR_VER_WEIGHT 1000 #define DMS_LOCAL_MAJOR_VERSION 0 #define DMS_LOCAL_MINOR_VERSION 0 -#define DMS_LOCAL_VERSION 163 +#define DMS_LOCAL_VERSION 164 #define DMS_SUCCESS 0 #define DMS_ERROR (-1) @@ -841,14 +841,16 @@ typedef void(*dms_stats_buf)(void *db_handle, dms_buf_ctrl_t *buf_ctrl, dms_buf_ typedef int(*dms_remove_buf_load_status)(dms_buf_ctrl_t *buf_ctrl, dms_buf_load_status_t dms_buf_load_status); typedef void(*dms_update_global_lsn)(void *db_handle, unsigned long long lamport_lsn); typedef void(*dms_update_global_scn)(void *db_handle, unsigned long long lamport_scn); -typedef void(*dms_update_node_lfn)(void *db_handle, unsigned long long lfn, char node_id); +typedef void(*dms_update_node_lfn)(void *db_handle, unsigned char node_id, unsigned long long node_lfn, + unsigned long long *node_data, unsigned int len); typedef void(*dms_update_page_lfn)(dms_buf_ctrl_t *buf_ctrl, unsigned long long lastest_lfn); typedef unsigned long long (*dms_get_page_lfn)(dms_buf_ctrl_t *buf_ctrl); typedef unsigned long long (*dms_get_page_scn)(dms_buf_ctrl_t *buf_ctrl); typedef unsigned long long(*dms_get_global_lfn)(void *db_handle); typedef unsigned long long(*dms_get_global_scn)(void *db_handle); typedef unsigned long long(*dms_get_global_lsn)(void *db_handle); -typedef unsigned long long(*dms_get_global_flushed_lfn)(void *db_handle); +typedef void(*dms_get_global_flushed_lfn)(void *db_handle, unsigned char *node_id, unsigned long long *node_lfn, + unsigned long long *node_data, unsigned int len); typedef int(*dms_read_local_page4transfer)(void *db_handle, char pageid[DMS_PAGEID_SIZE], dms_lock_mode_t mode, dms_buf_ctrl_t **buf_ctrl); typedef int(*dms_try_read_local_page)(void *db_handle, char pageid[DMS_PAGEID_SIZE], @@ -980,6 +982,8 @@ typedef void (*dms_set_online_list)(void *db_handle, unsigned long long online_l typedef int (*dms_standby_update_remove_node_ctrl)(void *db_handle, unsigned long long online_list); typedef int (*dms_standby_stop_thread)(void *db_handle, unsigned long long online_list, unsigned int reformer_id); typedef int (*dms_standby_reload_node_ctrl)(void *db_handle); +typedef int (*dms_standby_stop_server)(void *db_handle); +typedef int (*dms_standby_resume_server)(void *db_handle); typedef int (*dms_start_lrpl)(void *db_handle, int is_reformer); typedef int (*dms_stop_lrpl)(void *db_handle, int is_reformer); typedef int (*dms_az_switchover_demote_phase1)(void *db_handle); @@ -1169,6 +1173,8 @@ typedef struct st_dms_callback { dms_standby_update_remove_node_ctrl standby_update_remove_node_ctrl; dms_standby_stop_thread standby_stop_thread; dms_standby_reload_node_ctrl standby_reload_node_ctrl; + dms_standby_stop_server standby_stop_server; + dms_standby_resume_server standby_resume_server; dms_start_lrpl start_lrpl; dms_stop_lrpl stop_lrpl; -- Gitee From bfc6192f318f73aed91be0823eabe248f92d2267 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Fri, 2 Aug 2024 10:16:35 +0800 Subject: [PATCH 141/347] =?UTF-8?q?M=E5=85=BC=E5=AE=B9=E6=80=A7=20gsql=20?= =?UTF-8?q?=E8=BE=93=E5=87=BA=E8=BD=AC=E4=B9=89=E5=AD=97=E7=AC=A6=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F=E5=8C=96=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/psql/command.cpp | 2 + src/bin/psql/common.cpp | 27 +++ src/bin/psql/common.h | 1 + src/bin/psql/mainloop.cpp | 8 +- src/bin/psql/mbprint.cpp | 9 + src/bin/psql/settings.h | 3 + src/bin/psql/startup.cpp | 2 + src/include/postgres_fe.h | 9 + src/test/regress/expected/encrypt_decrypt.out | 186 +++++++++--------- 9 files changed, 147 insertions(+), 100 deletions(-) diff --git a/src/bin/psql/command.cpp b/src/bin/psql/command.cpp index 62ca2b60b7..c3bfd0e293 100644 --- a/src/bin/psql/command.cpp +++ b/src/bin/psql/command.cpp @@ -1628,6 +1628,8 @@ static bool do_connect(char* dbname, char* user, char* host, char* port) SyncVariables(); connection_warnings(false); /* Must be after SyncVariables */ + pset.dbType = GetDatabaseType(); + /* Tell the user about the new connection */ if (!pset.quiet) { if (param_is_newly_set(PQhost(o_conn), PQhost(pset.db)) || diff --git a/src/bin/psql/common.cpp b/src/bin/psql/common.cpp index 5d87b31fae..a9c98edb58 100644 --- a/src/bin/psql/common.cpp +++ b/src/bin/psql/common.cpp @@ -2732,4 +2732,31 @@ bool CheckSpecificExtension(PGconn *connection, char *extension) PQclear(res); return isHasExtension; +} + +/* Get Database Type */ +DBFormatType GetDatabaseType() +{ + char* compatibilityStr; + /* Default set A_FORMAT */ + DBFormatType dbType = A_FORMAT; + + PGresult* res = PQexec(pset.db, "show sql_compatibility"); + if (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK) { + compatibilityStr = PQgetvalue(res, 0, 0); + if (strcmp(compatibilityStr, "A") == 0) { + dbType = A_FORMAT; + } else if (strcmp(compatibilityStr, "B") == 0) { + dbType = B_FORMAT; + } else if (strcmp(compatibilityStr, "C") == 0) { + dbType = C_FORMAT; + } else if (strcmp(compatibilityStr, "PG") == 0) { + dbType = PG_FORMAT; + } + } + + PQclear(res); + res = NULL; + + return dbType; } \ No newline at end of file diff --git a/src/bin/psql/common.h b/src/bin/psql/common.h index 3d6d1efb60..013d07b049 100644 --- a/src/bin/psql/common.h +++ b/src/bin/psql/common.h @@ -99,6 +99,7 @@ extern void ResetQueryRetryController(); extern bool QueryRetryController(const char* query); extern bool SendQuery(const char* query, bool is_print = true, bool print_error = true); extern bool MakeCopyWorker(const char* query, int nclients); +extern DBFormatType GetDatabaseType(); extern bool is_superuser(void); extern bool standard_strings(void); diff --git a/src/bin/psql/mainloop.cpp b/src/bin/psql/mainloop.cpp index 96007e82f2..207d9e9a5a 100644 --- a/src/bin/psql/mainloop.cpp +++ b/src/bin/psql/mainloop.cpp @@ -368,13 +368,7 @@ int MainLoop(FILE* source, char* querystring) exit(EXIT_FAILURE); } - /* Initialize current database compatibility */ - PGresult* res = PQexec(pset.db, "show sql_compatibility"); - if (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK) { - is_b_format = strcmp (PQgetvalue(res, 0, 0), "B") == 0; - } - PQclear(res); - res = NULL; + is_b_format = IS_CMPT(pset.dbType, B_FORMAT); /* main loop to get queries and execute them */ while (successResult == EXIT_SUCCESS) { diff --git a/src/bin/psql/mbprint.cpp b/src/bin/psql/mbprint.cpp index d361ee4337..82707585a2 100644 --- a/src/bin/psql/mbprint.cpp +++ b/src/bin/psql/mbprint.cpp @@ -221,6 +221,10 @@ void pg_wcssize( linewidth++; format_size++; } while (linewidth % 8 != 0); + } else if (IS_CMPT(pset.dbType, B_FORMAT) && *pwcs == '\b') { /* backspace character */ + linewidth += w; + } else if (IS_CMPT(pset.dbType, B_FORMAT) && *pwcs == '\032') { /* ascii 26(control + Z)*/ + /* Do nothing for this */ } else if (w < 0) { /* Other control char */ linewidth += 4; format_size += 4; @@ -298,6 +302,11 @@ void pg_wcsformat(const unsigned char* pwcs, size_t len, int encoding, struct li *ptr++ = ' '; linewidth++; } while (linewidth % 8 != 0); + } else if (IS_CMPT(pset.dbType, B_FORMAT) && *pwcs == '\b') { /* backspace character */ + *(--ptr) = *pwcs; + linewidth += w; + } else if (IS_CMPT(pset.dbType, B_FORMAT) && *pwcs == '\032') { /* ascii 26(control + Z)*/ + /* Do nothing for this */ } else if (w < 0) { /* Other control char */ rc = sprintf_s((char*)ptr, max_bytes - linewidth + 1, "\\x%02X", *pwcs); securec_check_ss_c(rc, "\0", "\0"); diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h index fffd3d12ff..e4a34e665a 100644 --- a/src/bin/psql/settings.h +++ b/src/bin/psql/settings.h @@ -12,6 +12,7 @@ #include "variables.h" #include "print.h" +#include "postgres_fe.h" /* Database Security: Data importing/dumping support AES128. */ #include "utils/aes.h" @@ -86,6 +87,8 @@ typedef struct _psqlSettings { VariableSpace vars; /* "shell variable" repository */ + DBFormatType dbType; + /* * The remaining fields are set by assign hooks associated with entries in * "vars". They should not be set directly except by those hook diff --git a/src/bin/psql/startup.cpp b/src/bin/psql/startup.cpp index 03c5f5f144..64fb84c58d 100644 --- a/src/bin/psql/startup.cpp +++ b/src/bin/psql/startup.cpp @@ -701,6 +701,8 @@ int main(int argc, char* argv[]) SyncVariables(); + pset.dbType = GetDatabaseType(); + if (options.action == ACT_LIST_DB && !isparseonly) { int success; diff --git a/src/include/postgres_fe.h b/src/include/postgres_fe.h index fed05e7a5d..024074631f 100644 --- a/src/include/postgres_fe.h +++ b/src/include/postgres_fe.h @@ -56,6 +56,15 @@ typedef enum { TD_FORMAT, M_FORMAT } DatabaseType; + +typedef enum { + A_FORMAT = 0x0001, + B_FORMAT = 0x0002, + C_FORMAT = 0x0004, + PG_FORMAT = 0x0008 +} DBFormatType; +#define IS_CMPT(cmpt, flag) (((uint32)(cmpt) & (uint32)(flag)) != 0) + #endif // HAVE_DATABASE_TYPE #endif /* POSTGRES_FE_H */ diff --git a/src/test/regress/expected/encrypt_decrypt.out b/src/test/regress/expected/encrypt_decrypt.out index 8038ffbdc6..275ee61d68 100644 --- a/src/test/regress/expected/encrypt_decrypt.out +++ b/src/test/regress/expected/encrypt_decrypt.out @@ -137,10 +137,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -161,15 +161,15 @@ select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', 'abcd'); ERROR: the size of init_vector must be greater than or equal to 16. CONTEXT: referenced column: aes_encrypt select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef'); - aes_encrypt -------------------------- - 8\x01MP\x1Fe3\x0F \x08` + aes_encrypt +-------------------- + 8\x01MP\x1Fe3\x0F` (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456'); - aes_encrypt -------------------------- - 8\x01MP\x1Fe3\x0F \x08` + aes_encrypt +-------------------- + 8\x01MP\x1Fe3\x0F` (1 row) select lengthb(aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456')), lengthb('HuaweiGauss_234'); @@ -277,10 +277,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -417,10 +417,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -557,10 +557,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -611,9 +611,9 @@ select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef') = (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890dgjahdjk'); - aes_encrypt ---------------------------- - $+&+\x074uC\x08\x1CK\x1E + aes_encrypt +---------------------- + $+&+\x074u\x1CK\x1E (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef') = aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890dgjahdjk'); @@ -697,10 +697,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -837,10 +837,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -977,10 +977,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1117,10 +1117,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1257,10 +1257,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1281,15 +1281,15 @@ select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', 'abcd'); ERROR: the size of init_vector must be greater than or equal to 16. CONTEXT: referenced column: aes_encrypt select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef'); - aes_encrypt ------------------------ - hJ#\x0E\x1A\x01跟uGfG + aes_encrypt +------------------- + hJ#\x0E\x01跟uGfG (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456'); - aes_encrypt ------------------------ - hJ#\x0E\x1A\x01跟uGfG + aes_encrypt +------------------- + hJ#\x0E\x01跟uGfG (1 row) select lengthb(aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456')), lengthb('HuaweiGauss_234'); @@ -1397,10 +1397,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1537,10 +1537,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1561,15 +1561,15 @@ select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', 'abcd'); ERROR: the size of init_vector must be greater than or equal to 16. CONTEXT: referenced column: aes_encrypt select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef'); - aes_encrypt -------------------- - p\x1A\x05pg\x17R® + aes_encrypt +--------------- + p\x05pg\x17R® (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456'); - aes_encrypt -------------------- - p\x1A\x05pg\x17R® + aes_encrypt +--------------- + p\x05pg\x17R® (1 row) select lengthb(aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456')), lengthb('HuaweiGauss_234'); @@ -1677,10 +1677,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1701,15 +1701,15 @@ select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', 'abcd'); ERROR: the size of init_vector must be greater than or equal to 16. CONTEXT: referenced column: aes_encrypt select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef'); - aes_encrypt ------------------------ - hJ#\x0E\x1A\x01跟uGfG + aes_encrypt +------------------- + hJ#\x0E\x01跟uGfG (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456'); - aes_encrypt ------------------------ - hJ#\x0E\x1A\x01跟uGfG + aes_encrypt +------------------- + hJ#\x0E\x01跟uGfG (1 row) select lengthb(aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456')), lengthb('HuaweiGauss_234'); @@ -1817,10 +1817,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1957,10 +1957,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); @@ -1981,15 +1981,15 @@ select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', 'abcd'); ERROR: the size of init_vector must be greater than or equal to 16. CONTEXT: referenced column: aes_encrypt select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef'); - aes_encrypt -------------------- - p\x1A\x05pg\x17R® + aes_encrypt +--------------- + p\x05pg\x17R® (1 row) select aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456'); - aes_encrypt -------------------- - p\x1A\x05pg\x17R® + aes_encrypt +--------------- + p\x05pg\x17R® (1 row) select lengthb(aes_encrypt('HuaweiGauss_234', 'cdjsfj3713vdVHV1', '1234567890abcdef123456')), lengthb('HuaweiGauss_234'); @@ -2097,10 +2097,10 @@ select aes_decrypt(aes_encrypt('大家好','cdjsfj3713vdVHV1', '1234567890abcdef (1 row) select aes_decrypt(aes_encrypt(E'd\nsa\tdj\bsaf\Z\\dssa\ca\rs','dajd123FGBJG', '1234567890abcdef'),'dajd123FGBJG', '1234567890abcdef'); - aes_decrypt ------------------------------- - d + - sa dj\x08safZ\dssaca\rs + aes_decrypt +------------------------- + d + + sa dsafZ\dssaca\rs (1 row) select aes_decrypt(aes_encrypt('~·!@#¥%……&*()-=——+{}【】、|:‘’,。《》、?','12345dvghadCVBUJNF', '1234567890abcdef'),'12345dvghadCVBUJNF', '1234567890abcdef'); -- Gitee From 081ef521bcbb8ae4f7ae2ee6e27363c161636e5a Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 5 Aug 2024 11:32:22 +0800 Subject: [PATCH 142/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddolphin=E5=9B=A0DBE?= =?UTF-8?q?=5FPERF.statement=E9=87=8D=E5=BB=BA=E8=80=8C=E5=8D=87=E7=BA=A7?= =?UTF-8?q?=E5=A4=B1=E8=B4=A5=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CMakeLists.txt | 6 ++++ build/script/aarch64_finance_list | 2 ++ build/script/aarch64_lite_list | 2 ++ build/script/aarch64_opengauss_list | 2 ++ .../opengauss_release_list_ubuntu_single | 2 ++ build/script/x86_64_finance_list | 2 ++ build/script/x86_64_lite_list | 2 ++ build/script/x86_64_opengauss_list | 2 ++ .../rollback-post_catalog_maindb_92_945.sql | 28 ++++++++++++++++++- .../rollback-post_catalog_otherdb_92_945.sql | 28 ++++++++++++++++++- .../upgrade-post_catalog_maindb_92_945.sql | 28 ++++++++++++++++++- .../upgrade-post_catalog_otherdb_92_945.sql | 28 ++++++++++++++++++- 12 files changed, 128 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 67bb3d20ae..5aab533a3f 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -144,6 +144,12 @@ if(EXISTS ${CMAKE_SOURCE_DIR}/contrib/dolphin) install(FILES ${CMAKE_SOURCE_DIR}/contrib/dolphin/dolphin--2.0.1--2.0.sql DESTINATION share/postgresql/extension/ ) + install(FILES ${CMAKE_SOURCE_DIR}/contrib/dolphin/dolphin--4.0--4.0.1.sql + DESTINATION share/postgresql/extension/ + ) + install(FILES ${CMAKE_SOURCE_DIR}/contrib/dolphin/dolphin--4.0.1--4.0.sql + DESTINATION share/postgresql/extension/ + ) install(FILES ${CMAKE_SOURCE_DIR}/contrib/dolphin/openGauss_expr_dolphin.ir DESTINATION share/postgresql/extension/ ) diff --git a/build/script/aarch64_finance_list b/build/script/aarch64_finance_list index f8353c9a28..608f59ec6a 100644 --- a/build/script/aarch64_finance_list +++ b/build/script/aarch64_finance_list @@ -86,6 +86,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/whale.control ./share/postgresql/extension/whale--1.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir diff --git a/build/script/aarch64_lite_list b/build/script/aarch64_lite_list index fdbd3de107..c3545e89b9 100644 --- a/build/script/aarch64_lite_list +++ b/build/script/aarch64_lite_list @@ -45,6 +45,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir ./share/postgresql/extension/file_fdw--1.0.sql ./share/postgresql/extension/plpgsql.control diff --git a/build/script/aarch64_opengauss_list b/build/script/aarch64_opengauss_list index 102ad439e3..c21a558966 100644 --- a/build/script/aarch64_opengauss_list +++ b/build/script/aarch64_opengauss_list @@ -89,6 +89,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir ./share/postgresql/extension/age--1.0.0.sql ./share/postgresql/extension/age.control diff --git a/build/script/opengauss_release_list_ubuntu_single b/build/script/opengauss_release_list_ubuntu_single index 0632e8e03c..0f00e69f63 100644 --- a/build/script/opengauss_release_list_ubuntu_single +++ b/build/script/opengauss_release_list_ubuntu_single @@ -75,6 +75,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir ./share/postgresql/extension/age--1.0.0.sql ./share/postgresql/extension/age.control diff --git a/build/script/x86_64_finance_list b/build/script/x86_64_finance_list index b12b4ff588..899b439da2 100644 --- a/build/script/x86_64_finance_list +++ b/build/script/x86_64_finance_list @@ -86,6 +86,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/whale.control ./share/postgresql/extension/whale--1.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir diff --git a/build/script/x86_64_lite_list b/build/script/x86_64_lite_list index 5a1647c2d2..58cedc6375 100644 --- a/build/script/x86_64_lite_list +++ b/build/script/x86_64_lite_list @@ -45,6 +45,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir ./share/postgresql/extension/file_fdw--1.0.sql ./share/postgresql/extension/plpgsql.control diff --git a/build/script/x86_64_opengauss_list b/build/script/x86_64_opengauss_list index 98b009cc70..1273cd99c6 100644 --- a/build/script/x86_64_opengauss_list +++ b/build/script/x86_64_opengauss_list @@ -87,6 +87,8 @@ ./share/postgresql/extension/dolphin--4.0--3.0.sql ./share/postgresql/extension/dolphin--2.0--2.0.1.sql ./share/postgresql/extension/dolphin--2.0.1--2.0.sql +./share/postgresql/extension/dolphin--4.0--4.0.1.sql +./share/postgresql/extension/dolphin--4.0.1--4.0.sql ./share/postgresql/extension/openGauss_expr_dolphin.ir ./share/postgresql/extension/age--1.0.0.sql ./share/postgresql/extension/age.control diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql index 3aa80da32d..1c5c292d3e 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql @@ -1,3 +1,16 @@ +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0.1'; + end if; + exit; + END LOOP; +END$$; + DO $DO$ DECLARE ans boolean; @@ -692,4 +705,17 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; END IF; END IF; -END $DO$; \ No newline at end of file +END $DO$; + +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0'; + end if; + exit; + END LOOP; +END$$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql index 3aa80da32d..1c5c292d3e 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql @@ -1,3 +1,16 @@ +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0.1'; + end if; + exit; + END LOOP; +END$$; + DO $DO$ DECLARE ans boolean; @@ -692,4 +705,17 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; END IF; END IF; -END $DO$; \ No newline at end of file +END $DO$; + +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0'; + end if; + exit; + END LOOP; +END$$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql index 1f669cc58b..7f7aa55807 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql @@ -1,3 +1,16 @@ +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0.1'; + end if; + exit; + END LOOP; +END$$; + DO $DO$ DECLARE ans boolean; @@ -700,4 +713,17 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; END IF; END IF; -END $DO$; \ No newline at end of file +END $DO$; + +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0'; + end if; + exit; + END LOOP; +END$$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql index 1f669cc58b..7f7aa55807 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql @@ -1,3 +1,16 @@ +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0.1'; + end if; + exit; + END LOOP; +END$$; + DO $DO$ DECLARE ans boolean; @@ -700,4 +713,17 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; END IF; END IF; -END $DO$; \ No newline at end of file +END $DO$; + +do $$ +DECLARE +ans boolean; +BEGIN + for ans in select case when count(*)=1 then true else false end as ans from (select extname from pg_extension where extname='dolphin') + LOOP + if ans = true then + ALTER EXTENSION dolphin UPDATE TO '4.0'; + end if; + exit; + END LOOP; +END$$; \ No newline at end of file -- Gitee From a498e1a80b23768308941d1a88e2a2eb3ba7870a Mon Sep 17 00:00:00 2001 From: lukeman Date: Sat, 3 Aug 2024 14:50:48 +0800 Subject: [PATCH 143/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E6=89=A7=E8=A1=8C=E5=B1=82=E6=AC=A1=E6=9F=A5=E8=AF=A2=EF=BC=8C?= =?UTF-8?q?=E5=91=8A=E8=AD=A6=E5=90=8E=E6=8C=82=E5=BA=93=EF=BC=8C=E4=BA=A7?= =?UTF-8?q?=E7=94=9Fcore?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/plan/planstartwith.cpp | 17 ++++++++++++++++- src/test/regress/expected/sw_bugfix-2.out | 17 +++++++++++++++++ src/test/regress/sql/sw_bugfix-2.sql | 18 +++++++++++++++++- 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/optimizer/plan/planstartwith.cpp b/src/gausskernel/optimizer/plan/planstartwith.cpp index 274e999ada..0788d3f14b 100644 --- a/src/gausskernel/optimizer/plan/planstartwith.cpp +++ b/src/gausskernel/optimizer/plan/planstartwith.cpp @@ -555,6 +555,16 @@ static char *CheckAndFixSiblingsColName(PlannerInfo *root, Plan *basePlan, errmsg("expression with none-var in order siblings is not supported"))); } + foreach (lc, vars) { + Var* var = (Var *)lfirst(lc); + RangeTblEntry *rte = root->simple_rte_array[var->varno]; + char *raw_cte_alias = (char *)strVal(list_nth(rte->eref->colnames, var->varattno - 1)); + if (raw_cte_alias != NULL && IsPseudoReturnColumn(raw_cte_alias)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Not support refer startwith Pseudo column in order siblings by."))); + } + } + /* do not support multi-column refs specified as order sibling's sort entry */ if (list_length(vars) > 1) { ereport(WARNING, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -565,7 +575,12 @@ static char *CheckAndFixSiblingsColName(PlannerInfo *root, Plan *basePlan, RangeTblEntry *rte = root->simple_rte_array[var->varno]; char *raw_cte_alias = (char *)strVal(list_nth(rte->eref->colnames, var->varattno - 1)); resultColName = strrchr(raw_cte_alias, '@'); - resultColName += 1; /* fix '@' offset */ + if (resultColName != NULL) { + resultColName += 1; /* fix '@' offset */ + } else { + ereport(ERROR, (errmodule(MOD_OPT_PLANNER), + errmsg("Invalid column name %s in order siblings is found.", raw_cte_alias))); + } return resultColName; } diff --git a/src/test/regress/expected/sw_bugfix-2.out b/src/test/regress/expected/sw_bugfix-2.out index 99cd9f242b..76c218f8bc 100755 --- a/src/test/regress/expected/sw_bugfix-2.out +++ b/src/test/regress/expected/sw_bugfix-2.out @@ -2342,3 +2342,20 @@ explain select * from sw_tb_3 where exists (select * from sw_tb_1, sw_tb_2 where drop table sw_tb_1; drop table sw_tb_2; drop table sw_tb_3; +-- test ORDER SIBLINGS BY clause which contains an expression or alias with a pseudocolumn +drop table if exists hier; +create table hier(parent varchar(30),child varchar(30)); +insert into hier values(null,'Asia'); +insert into hier values('Asia','China'); +insert into hier values('Asia','Japan'); +insert into hier values('Asia','ENGLAND'); +insert into hier values('Asia','HONGKONG'); +insert into hier values('China','BEIJING'); +insert into hier values('China','SHANGHAI'); +insert into hier values('China','AK47'); +insert into hier values('China','天津'); +select child, level, lpad(' ', level*3, ' ')||child c1 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c1; +ERROR: Not support refer startwith Pseudo column in order siblings by. +select child, level, lpad(' ', level*3, ' ')||child c1, level c2 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c2; +ERROR: Not support refer startwith Pseudo column in order siblings by. +drop table hier; \ No newline at end of file diff --git a/src/test/regress/sql/sw_bugfix-2.sql b/src/test/regress/sql/sw_bugfix-2.sql index d98ff79005..ec21278013 100644 --- a/src/test/regress/sql/sw_bugfix-2.sql +++ b/src/test/regress/sql/sw_bugfix-2.sql @@ -865,4 +865,20 @@ explain select * from sw_tb_1,sw_tb_2 where (sw_tb_1.a+sw_tb_1.b=sw_tb_2.b or sw explain select * from sw_tb_3 where exists (select * from sw_tb_1, sw_tb_2 where sw_tb_1.a + sw_tb_2.a = sw_tb_3.a connect by level < 2); drop table sw_tb_1; drop table sw_tb_2; -drop table sw_tb_3; \ No newline at end of file +drop table sw_tb_3; + +-- test ORDER SIBLINGS BY clause which contains an expression or alias with a pseudocolumn +drop table if exists hier; +create table hier(parent varchar(30),child varchar(30)); +insert into hier values(null,'Asia'); +insert into hier values('Asia','China'); +insert into hier values('Asia','Japan'); +insert into hier values('Asia','ENGLAND'); +insert into hier values('Asia','HONGKONG'); +insert into hier values('China','BEIJING'); +insert into hier values('China','SHANGHAI'); +insert into hier values('China','AK47'); +insert into hier values('China','天津'); +select child, level, lpad(' ', level*3, ' ')||child c1 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c1; +select child, level, lpad(' ', level*3, ' ')||child c1, level c2 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c2; +drop table hier; \ No newline at end of file -- Gitee From d16b0aab691493599a6af21b259455eb6f03379a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Mon, 5 Aug 2024 16:06:58 +0800 Subject: [PATCH 144/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dview=E5=88=A0?= =?UTF-8?q?=E9=99=A4=E4=BE=9D=E8=B5=96=E5=88=86=E5=8C=BA=E8=A1=A8=E3=80=81?= =?UTF-8?q?enum=E3=80=81set=20issue?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/dependency.cpp | 8 +++++-- src/common/backend/nodes/outfuncs.cpp | 26 +++++++++++++++++++++++ src/common/backend/nodes/readfuncs.cpp | 8 +++++++ src/common/backend/utils/init/globals.cpp | 3 ++- src/include/miscadmin.h | 1 + src/include/nodes/parsenodes.h | 8 +++++++ 6 files changed, 51 insertions(+), 3 deletions(-) diff --git a/src/common/backend/catalog/dependency.cpp b/src/common/backend/catalog/dependency.cpp index 89cf82de1e..acd436246c 100644 --- a/src/common/backend/catalog/dependency.cpp +++ b/src/common/backend/catalog/dependency.cpp @@ -951,7 +951,8 @@ void reportDependentObjects( * In restrict mode, we check targetObjects, remove object entries related to views from targetObjects, * and ensure that no errors are reported due to deleting table fields that have view references. */ - if (behavior == DROP_RESTRICT && origObject != NULL && (origObject->objectSubId != 0 || u_sess->attr.attr_sql.dolphin)) { + if (behavior == DROP_RESTRICT && + ((origObject != NULL && origObject->objectSubId != 0) || u_sess->attr.attr_sql.dolphin)) { ObjectAddresses* newTargetObjects = new_object_addresses(); const ObjectAddress* originalObj = NULL; const int typeOidOffset = 2; @@ -974,7 +975,10 @@ void reportDependentObjects( (originalObj->objectId + typeOidOffset) == obj->objectId)) { // delete pg_type entry add_exact_object_address_extra(obj, extra, newTargetObjects); - } else if (objClass != OCLASS_REWRITE || (u_sess->attr.attr_sql.dolphin && extra->dependee.objectId == origObject->objectId)) { // delete constraint and so on + } else if (objClass != OCLASS_REWRITE || + (u_sess->attr.attr_sql.dolphin && + originalObj != NULL && extra->dependee.objectId == originalObj->objectId)) { + // delete constraint and so on add_exact_object_address_extra(obj, extra, newTargetObjects); } } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index d59fb43fd6..4cb25fe0af 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -5236,6 +5236,23 @@ static void _outIndexHintDefinition(StringInfo str, IndexHintDefinition* node) WRITE_ENUM_FIELD(index_type, IndexHintType); } +static void _outPartitionNameList(StringInfo str, List *list, const char *key) +{ + ListCell *lc = NULL; + List *names = NIL; + foreach(lc, list) { + Oid id = lfirst_oid(lc); + char *name = ""; + if (OidIsValid(id)) { + name = getPartitionName(id, false); + } + Value *val = makeString(name); + names = lappend(names, val); + } + appendStringInfo(str, key); + _outList(str, names); +} + static void _outRangeTblEntry(StringInfo str, RangeTblEntry* node) { WRITE_NODE_TYPE("RTE"); @@ -5355,6 +5372,15 @@ static void _outRangeTblEntry(StringInfo str, RangeTblEntry* node) WRITE_NODE_FIELD(partitionOidList); WRITE_NODE_FIELD(subpartitionOidList); } + + if (t_thrd.proc->workingVersionNum >= PARTITION_NAME_VERSION_NUM) { + if (node->partitionOidList != NIL) { + _outPartitionNameList(str, node->partitionOidList, " :partitionNameList "); + } + if (node->subpartitionOidList != NIL) { + _outPartitionNameList(str, node->subpartitionOidList, " :subpartitionNameList "); + } + } } /* diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 5b818c43b0..1389c9e7a6 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -3469,6 +3469,14 @@ static RangeTblEntry* _readRangeTblEntry(void) local_node->subpartitionOidList = lappend_oid(local_node->subpartitionOidList, local_node->subpartitionOid); } + IF_EXIST(partitionNameList) { + READ_NODE_FIELD(partitionNameList); + } + + IF_EXIST(subpartitionNameList) { + READ_NODE_FIELD(subpartitionNameList); + } + READ_DONE(); } diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index dda1df83ac..00258ea4d2 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,12 +76,13 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92946; +const uint32 GRAND_VERSION_NUM = 92947; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 PARTITION_NAME_VERSION_NUM = 92947; const uint32 AUDIT_SHA_VERSION_NUM = 92946; const uint32 NETTIME_TRACE_VERSION_NUM = 92945; const uint32 HBA_CONF_VERSION_NUM = 92944; diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 205fe670bb..13d0c030a5 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -152,6 +152,7 @@ extern const uint32 FLOAT_VERSION_NUMBER; extern const uint32 STRAIGHT_JOIN_VERSION_NUMBER; extern const uint32 PARALLEL_ENABLE_VERSION_NUM; extern const uint32 AUDIT_SHA_VERSION_NUM; +extern const uint32 PARTITION_NAME_VERSION_NUM; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index daea0a285f..be0c438402 100755 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -377,11 +377,19 @@ typedef struct RangeTblEntry { * or select * from table_name partition for (partition_key_value_list) * or delete from table_name partition (partition_name, ...) */ + List *partitionNameList; /* + * Names of a partition if relation is partitioned table. + * Get names when partition tables deleted. + */ List *subpartitionOidList; /* * OIDs of a subpartition if relation is partitioned table. * Select * from table_name subpartition (subpartition_name); * or delete from table_name partition (partition_name, ...) */ + List *subpartitionNameList; /* + * Names of a subpartition if relation is partitioned table. + * Get names when partition tables deleted. + */ int cursorDop; /* for functionscan with cursor param */ } RangeTblEntry; -- Gitee From 5a1d3d85103a510a01ebb36b086bce835d3ae1b2 Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Mon, 5 Aug 2024 16:35:05 +0800 Subject: [PATCH 145/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=AE=B9=E5=99=A8?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=A4=B1=E8=B4=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docker/upgrade/upgrade_common.sh | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/docker/upgrade/upgrade_common.sh b/docker/upgrade/upgrade_common.sh index fb63888add..87dff17d60 100644 --- a/docker/upgrade/upgrade_common.sh +++ b/docker/upgrade/upgrade_common.sh @@ -988,6 +988,27 @@ function upgrade_pre() { log "no need do upgrade_pre step" fi } + +function cp_dolphin_upgrade_script_step1() { + if ls "$GAUSSHOME"/share/postgresql/extension/ | grep -qE "dolphin--(.*)--(.*)sql" ; then + if cp -f "$GAUSSHOME"/share/postgresql/extension/dolphin--*--*sql "$GAUSS_TMP_PATH"/ ; then + log "cp dolphin upgrade script step1[upgrade_pre] successfully" + else + die "cp dolphin upgrade script step1[upgrade_pre] failed" ${err_upgrade_pre} + fi + fi +} + +function cp_dolphin_upgrade_script_step2() { + if ls "$GAUSS_TMP_PATH/" | grep -qE "dolphin--(.*)--(.*)sql" ; then + if cp -f "$GAUSS_TMP_PATH"/dolphin--*--*sql "$GAUSSHOME"/share/postgresql/extension/ ; then + log "cp dolphin upgrade script step1[upgrade_bin] successfully" + else + die "cp dolphin upgrade script step1[upgrade_bin] failed" ${err_upgrade_pre} + fi + fi +} + function upgrade_pre_step1() { check_disk check_version @@ -996,6 +1017,7 @@ function upgrade_pre_step1() { prepare_sql_all fi bak_gauss + cp_dolphin_upgrade_script_step1 record_step 1 } @@ -1024,6 +1046,7 @@ function upgrade_pre_step2() { function upgrade_bin() { parses_step + cp_dolphin_upgrade_script_step2 if [[ "$current_step" -lt 0 ]]; then die "Step file may be changed invalid" ${err_upgrade_bin} elif [[ "$current_step" -lt 2 ]]; then @@ -1146,6 +1169,13 @@ function upgrade_post_step56() { die "Guassdb is not running" ${err_upgrade_post} fi record_step 5 + + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback-post_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback-post_otherdb.sql otherdb; then + debug "upgrade-rollback post sql successfully" + else + die "upgrade-rollback post sql failed" ${err_rollback_post} + fi + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade-post_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade-post_otherdb.sql otherdb; then debug "upgrade post sql successfully" else -- Gitee From 22ec6001916651f73c1f97ecf00b5d7b8f141095 Mon Sep 17 00:00:00 2001 From: lukeman Date: Mon, 5 Aug 2024 17:43:31 +0800 Subject: [PATCH 146/347] =?UTF-8?q?=E3=80=90=E5=A4=84=E7=90=868.0=20core?= =?UTF-8?q?=E9=97=AE=E9=A2=98=E3=80=91blob=E5=AD=97=E7=AC=A6=E5=BA=8F?= =?UTF-8?q?=E6=96=AD=E8=A8=80=E5=AF=BC=E8=87=B4core,=20=E6=B7=BB=E5=8A=A0I?= =?UTF-8?q?sBinaryType=E7=9A=84hook=E5=87=BD=E6=95=B0=EF=BC=8C=E5=A4=84?= =?UTF-8?q?=E7=90=86tinyblob=E7=B1=BB=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_type.cpp | 5 ++++- src/include/knl/knl_session.h | 1 + src/include/parser/parse_type.h | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/common/backend/parser/parse_type.cpp b/src/common/backend/parser/parse_type.cpp index 34f329f182..f4c1f7e281 100644 --- a/src/common/backend/parser/parse_type.cpp +++ b/src/common/backend/parser/parse_type.cpp @@ -1773,6 +1773,9 @@ Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid, Oid n bool IsBinaryType(Oid typid) { + if (u_sess->hook_cxt.isBinaryType != NULL) { + return ((isBinaryType)(u_sess->hook_cxt.isBinaryType))(typid); + } return ((typid) == BLOBOID || (typid) == BYTEAOID); } @@ -1807,4 +1810,4 @@ TypeTupStatus GetTypeTupStatus(Type typ) return (UNDEFINEDOID == HeapTupleGetOid(typ) ? UndefineTypeTup : NormalTypeTup); } return InvalidTypeTup; -} \ No newline at end of file +} diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 607d2004cb..e89df225e6 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -2947,6 +2947,7 @@ typedef struct knl_u_hook_context { void *deparseQueryHook; void *checkSqlFnRetvalHook; void *typeTransfer; + void *isBinaryType; void *forTsdbHook; void *pluginPlannerHook; void *groupingplannerHook; diff --git a/src/include/parser/parse_type.h b/src/include/parser/parse_type.h index 83a21cea6d..faabc0dfac 100644 --- a/src/include/parser/parse_type.h +++ b/src/include/parser/parse_type.h @@ -65,6 +65,7 @@ extern HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname TypeDependExtend* depend_extend = NULL); extern char* CastPackageTypeName(const char* typName, Oid pkgOid, bool isPackage, bool isPublic = true); extern bool IsBinaryType(Oid typid); +typedef bool (*isBinaryType)(Oid typid); #define ISCOMPLEX(typeid) (typeidTypeRelid(typeid) != InvalidOid) extern void check_type_supports_multi_charset(Oid typid, bool allow_array); extern char* ParseTypeName(const char* typName, Oid pkgOid); -- Gitee From 20770e00a236b28875447a5724bd0188ccebedb2 Mon Sep 17 00:00:00 2001 From: zzh Date: Mon, 5 Aug 2024 20:36:50 +0800 Subject: [PATCH 147/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BE=9D=E8=B5=96?= =?UTF-8?q?=E7=9A=84=E4=B8=89=E6=96=B9=E5=BA=93=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Third_Party_Open_Source_Software_Notice | 1650 +++++++++-------------- 1 file changed, 632 insertions(+), 1018 deletions(-) diff --git a/Third_Party_Open_Source_Software_Notice b/Third_Party_Open_Source_Software_Notice index 5c10445ad4..cc8517160b 100644 --- a/Third_Party_Open_Source_Software_Notice +++ b/Third_Party_Open_Source_Software_Notice @@ -28,7 +28,7 @@ Redistributions of source code must retain the above copyright notice, this list Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Software: Editline Library - libedit 3.1-20190324 +Software: Editline Library - libedit 3.1-20210910 Copyright notice:Copyright (c) 1992, 1993 Copyright (C) 1996-2015 Free Software Foundation, Inc. Copyright (c) 1997 The NetBSD Foundation, Inc. @@ -68,7 +68,7 @@ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Software: OpenSSL 1.1.1d +Software: OpenSSL 1.1.1m Copyright notice:Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. Copyright 2011-2018 The OpenSSL Project Authors. All Rights Reserved. @@ -294,7 +294,7 @@ License: OpenSSL Combined License * [including the GNU Public Licence.] */ -Software: lz4 v1.9.2 +Software: lz4 v1.9.3 Copyright notice: Copyright (c) 2016-present, Przemyslaw Skibinski copyright and related and neighboring rights to this software to Copyright (c) 2018-present lzutao @@ -344,79 +344,7 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Software: Postgresql JDBC Driver 42.2.5 -Copyright notice:Copyright (c) 1997, PostgreSQL Global Development Group -All rights reserved. -License:BSD 2-Clause License -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -Software: Postgres-XC 1.1 -Copyright notice:Portions Copyright (c) 2010-2013, Postgres-XC Development Group -Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group -Portions Copyright (c) 1994, The Regents of the University of California -License:PostgreSQL License - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose, without fee, and without a written agreement -is hereby granted, provided that the above copyright notice and this -paragraph and the following two paragraphs appear in all copies. - -IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR -DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING -LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS -DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS -ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO -PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - -IN NO EVENT SHALL POSTGRESQL GLOBAL DEVELOPMENT GROUP BE LIABLE TO ANY -PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS -SOFTWARE AND ITS DOCUMENTATION, EVEN IF POSTGRESQL GLOBAL DEVELOPMENT -GROUP HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -POSTGRESQL GLOBAL DEVELOPMENT GROUP SPECIFICALLY DISCLAIMS ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS -ON AN "AS IS" BASIS, AND THE POSTGRESQL GLOBAL DEVELOPMENT GROUP HAS NO OBLIGATIONS TO -PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - -IN NO EVENT SHALL POSTGRES-XC DEVELOPMENT GROUP BE LIABLE TO ANY -PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS -SOFTWARE AND ITS DOCUMENTATION, EVEN IF POSTGRES-XC DEVELOPMENT -GROUP HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -POSTGRES-XC DEVELOPMENT GROUP SPECIFICALLY DISCLAIMS ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS -ON AN "AS IS" BASIS, AND THE POSTGRES-XC DEVELOPMENT GROUP HAS NO OBLIGATIONS TO -PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - -Software: PSQL ODBC 10.03.0000 +Software: PSQL ODBC 2.3.9 Copyright notice:Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Copyright (C) 1991 Free Software Foundation, Inc. @@ -1388,112 +1316,7 @@ necessary. Here is a sample; alter the names: That's all there is to it! -Software: snappy 1.1.7 -Copyright notice:Copyright 2011, Google Inc. -Copyright 2011 Martin Gieseking . -Copyright 2008 Google Inc. All Rights Reserved. -Copyright 2011 Google Inc. All Rights Reserved. -Copyright 2005 Google Inc. All Rights Reserved. -Copyright 2005 and onwards Google Inc. -License:MIT License -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -=== - -Some of the benchmark data in testdata/ is licensed differently: - - - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and - is licensed under the Creative Commons Attribution 3.0 license - (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ - for more information. - - - kppkn.gtb is taken from the Gaviota chess tablebase set, and - is licensed under the MIT License. See - https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 - for more information. - - - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper - “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA - Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, - which is licensed under the CC-BY license. See - http://www.ploscompbiol.org/static/license for more ifnormation. - - - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project - Gutenberg. The first three have expired copyrights and are in the public - domain; the latter does not have expired copyright, but is still in the - public domain according to the license information - (http://www.gutenberg.org/ebooks/53). - - -Software: protobuf 3.7.1 -Copyright notice:Copyright 2008 Google Inc. All rights reserved. -Copyright 2015 Google Inc. All rights reserved. -Copyright 2017 Google Inc. All rights reserved. -Copyright 2016 Google Inc. All rights reserved. -Copyright 2014 Google Inc. All rights reserved. -Copyright 2007 Google Inc. All Rights Reserved. -Copyright 2008 Google Inc. -Copyright (c) 2007-2010 Baptiste Lepilleur. -License:BSD LICENSE - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - - - -Software: masstree v0.9.0 +Software: masstree v1.0.1 Copyright notice:Copyright (c) 2012-2014 President and Fellows of Harvard College Copyright (c) 2012-2014 Massachusetts Institute of Technology Copyright (c) 2012-2013 President and Fellows of Harvard College @@ -1532,7 +1355,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Software: mysql_fdw v2.5.3 +Software: mysql_fdw v2.5.5 Copyright notice:Copyright (c) 2011-2020, EnterpriseDB Corporation. License: @@ -1576,7 +1399,7 @@ AND THE MAGISTRAT DER STADT WIEN HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. -Software: llvm 7.0.0 +Software: llvm 12.0.1 Copyright notice:Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign. License:MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of @@ -1606,219 +1429,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - -Software: ncurses 6.2 -Copyright notice:Copyright (c) 2000,2003 Free Software Foundation, Inc. -Copyright (c) 1998-2017,2018 Free Software Foundation, Inc. -Copyright (c) 2003-2014,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2016,2017 Free Software Foundation, Inc. -Copyright (c) 1999-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2006-2012,2017 Free Software Foundation, Inc. -Copyright (c) 2008-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2011-2012,2016 Free Software Foundation, Inc. -Copyright (c) 2003-2012,2014 Free Software Foundation, Inc. -Copyright (c) 2005-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2002-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2003-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2002-2006,2017 Free Software Foundation, Inc. -Copyright (c) 2017 Free Software Foundation, Inc. -Copyright (c) 2007-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2009-2016,2017 Free Software Foundation, Inc. -Copyright (c) 1999-2013,2017 Free Software Foundation, Inc. -Copyright (c) 2014,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2013,2017 Free Software Foundation, Inc. -Copyright (c) 2013-2014,2017 Free Software Foundation, Inc. -Copyright (c) 2006-2014,2017 Free Software Foundation, Inc. -Copyright (c) 2003-2013,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2014,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2010,2017 Free Software Foundation, Inc. -Copyright (c) 2006-2016,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2013,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2012,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2010,2017 Free Software Foundation, Inc. -Copyright (c) 2008-2014,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2006,2008 Free Software Foundation, Inc. -Copyright (c) 2007-2014,2017 Free Software Foundation, Inc. -Copyright (c) 2009-2010,2012 Free Software Foundation, Inc. -Copyright (c) 2003-2006,2010 Free Software Foundation, Inc. -Copyright (c) 2016,2017 Free Software Foundation, Inc. -Copyright (c) 2010-2015,2017 Free Software Foundation, Inc. -Copyright (c) 2006-2013,2017 Free Software Foundation, Inc. -Copyright (c) 1998 Free Software Foundation, Inc. -Copyright (c) 2017,2018 Free Software Foundation, Inc. -Copyright (c) 2000-2013,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2017,2018 Free Software Foundation, Inc. -Copyright (c) 2007-2017,2018 Free Software Foundation, Inc. -Copyright (c) 2007-2009,2018 Free Software Foundation, Inc. -Copyright (c) 1998-2013,2014 Free Software Foundation, Inc. -Copyright (c) 1998-2002,2006 Free Software Foundation, Inc. -Copyright (c) 2007-2011,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2008,2009 Free Software Foundation, Inc. -Copyright (c) 2015,2016 Free Software Foundation, Inc. -Copyright (c) 2015-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2012 Free Software Foundation, Inc. -Copyright (c) 2003-2005,2008 Free Software Foundation, Inc. -Copyright (c) 2004-2011,2016 Free Software Foundation, Inc. -Copyright (c) 2002-2009,2011 Free Software Foundation, Inc. -Copyright (c) 2001-2016,2017 Free Software Foundation, Inc. -Copyright (c) 2002-2010,2014 Free Software Foundation, Inc. -Copyright (c) 2002-2011,2016 Free Software Foundation, Inc. -Copyright (c) 2002-2010,2016 Free Software Foundation, Inc. -Copyright (c) 2002-2009,2016 Free Software Foundation, Inc. -Copyright (c) 2002-2007,2009 Free Software Foundation, Inc. -Copyright (c) 2007,2017 Free Software Foundation, Inc. -Copyright (c) 2004,2009 Free Software Foundation, Inc. -Copyright (c) 2003-2011,2016 Free Software Foundation, Inc. -Copyright (c) 2002-2014,2017 Free Software Foundation, Inc. -Copyright (c) 2002-2015,2016 Free Software Foundation, Inc. -Copyright (c) 2001-2011,2012 Free Software Foundation, Inc. -Copyright (c) 2012,2013 Free Software Foundation, Inc. -Copyright (c) 1998-2007,2008 Free Software Foundation, Inc. -Copyright (c) 1998-2005,2011 Free Software Foundation, Inc. -Copyright (c) 1998-2012,2014 Free Software Foundation, Inc. -Copyright (c) 1998-2011,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2007,2013 Free Software Foundation, Inc. -Copyright (c) 1998-2003,2005 Free Software Foundation, Inc. -Copyright (c) 1998-2012,2013 Free Software Foundation, Inc. -Copyright (c) 2007-2012,2014 Free Software Foundation, Inc. -Copyright (c) 1998-2005,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2012,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2008,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2006,2007 Free Software Foundation, Inc. -Copyright (c) 1998-2000,2008 Free Software Foundation, Inc. -Copyright (c) 2000,2001,2017 Free Software Foundation, Inc. -Copyright (c) 1998,2006 Free Software Foundation, Inc. -Copyright (c) 2001-2013,2017 Free Software Foundation, Inc. -Copyright (c) 2008-2010,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2017 Free Software Foundation, Inc. -Copyright (c) 2012-2013,2016 Free Software Foundation, Inc. -Copyright (c) 2011-2014,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2015,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2012,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2010,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2010,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2004,2010 Free Software Foundation, Inc. -Copyright (c) 1998-2010,2013 Free Software Foundation, Inc. -Copyright (c) 1998-2012,2015 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2010 Free Software Foundation, Inc. -Copyright (c) 2008-2012,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2006,2009 Free Software Foundation, Inc. -Copyright (c) 2004-2010,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2012 Free Software Foundation, Inc. -Copyright (c) 1998,2006 Free Software Foundation, Inc. -Copyright (c) 1998-2011,2012 Free Software Foundation, Inc. -Copyright (c) 1999-2011,2012 Free Software Foundation, Inc. -Copyright (c) 1999-2010,2016 Free Software Foundation, Inc. -Copyright (c) 2001-2008,2012 Free Software Foundation, Inc. -Copyright (c) 2006,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2012,2018 Free Software Foundation, Inc. -Copyright (c) 1998-2006,2018 Free Software Foundation, Inc. -Copyright (c) 2008-2011,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2015,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2014,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2005,2010 Free Software Foundation, Inc. -Copyright (c) 1998-2008,2010 Free Software Foundation, Inc. -Copyright (c) 2010,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2006,2009 Free Software Foundation, Inc. -Copyright (c) 2002,2006 Free Software Foundation, Inc. -Copyright (c) 1998-2000,2005 Free Software Foundation, Inc. -Copyright (c) 2004-2011,2012 Free Software Foundation, Inc. -Copyright (c) 2008-2010,2014 Free Software Foundation, Inc. -Copyright (c) 2010 Free Software Foundation, Inc. -Copyright (c) 1998-2015,2018 Free Software Foundation, Inc. -Copyright (c) 1998-2005,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2003,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2008,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2000,2012 Free Software Foundation, Inc. -Copyright (c) 1999-2011,2017 Free Software Foundation, Inc. -Copyright (c) 2006-2011,2013 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2013 Free Software Foundation, Inc. -Copyright (c) 1998-2010,2015 Free Software Foundation, Inc. -Copyright (c) 1998-2003,2009 Free Software Foundation, Inc. -Copyright (c) 2000-2008,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2011,2015 Free Software Foundation, Inc. -Copyright (c) 2007-2010,2011 Free Software Foundation, Inc. -Copyright (c) 2001-2003,2017 Free Software Foundation, Inc. -Copyright (c) 1999-2013,2016 Free Software Foundation, Inc. -Copyright (c) 2013-2014,2016 Free Software Foundation, Inc. -Copyright (c) 2000-2012,2017 Free Software Foundation, Inc. -Copyright (c) 2005-2012,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2009,2016 Free Software Foundation, Inc. -Copyright (c) 2010-2014,2016 Free Software Foundation, Inc. -Copyright (c) 2002-2012,2017 Free Software Foundation, Inc. -Copyright (c) 2001-2015,2017 Free Software Foundation, Inc. -Copyright (c) 2002-2015,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2007,2010 Free Software Foundation, Inc. -Copyright (c) 2002-2011,2012 Free Software Foundation, Inc. -Copyright (c) 1998-2006,2010 Free Software Foundation, Inc. -Copyright (c) 2002-2010,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2015,2017 Free Software Foundation, Inc. -Copyright (c) 2007-2014,2015 Free Software Foundation, Inc. -Copyright (c) 1998-2013,2015 Free Software Foundation, Inc. -Copyright (c) 2008-2015,2017 Free Software Foundation, Inc. -Copyright (c) 2000-2016,2017 Free Software Foundation, Inc. -Copyright (c) 1998-2011,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2014,2015 Free Software Foundation, Inc. -Copyright (c) 2010-2013,2017 Free Software Foundation, Inc. -Copyright (c) 1999-2008,2010 Free Software Foundation, Inc. -Copyright (c) 1998,2010 Free Software Foundation, Inc. -Copyright (c) 2011-2015,2017 Free Software Foundation, Inc. -Copyright (c) 2016 Free Software Foundation, Inc. -Copyright (c) 2014 Free Software Foundation, Inc. -Copyright (c) 2009-2010,2011 Free Software Foundation, Inc. -Copyright (c) 1980, 1991, 1993 -Copyright (c) 1998-2002,2003 Free Software Foundation, Inc. -Copyright (c) 1998-2004,2012 Free Software Foundation, Inc. -Copyright (c) 2011,2014 Free Software Foundation, Inc. -Copyright (c) 2011-2014,2015 Free Software Foundation, Inc. -Copyright (c) 2011,2015 Free Software Foundation, Inc. -Copyright (c) 2016,2018 Free Software Foundation, Inc. -Copyright (c) 1998,2003 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2014 Free Software Foundation, Inc. -Copyright (c) 1999-2009,2011 Free Software Foundation, Inc. -Copyright (c) 2003-2006,2009 Free Software Foundation, Inc. -Copyright (c) 2005,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2000,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2001,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2004,2017 Free Software Foundation, Inc. -Copyright (c) 2008-2012,2014 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2013,2016 Free Software Foundation, Inc. -Copyright (c) 2004-2009,2016 Free Software Foundation, Inc. -Copyright (c) 1998-2004,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2010,2011 Free Software Foundation, Inc. -Copyright (c) 1998-2009,2011 Free Software Foundation, Inc. -Copyright (c) 1998-2005,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2007,2009 Free Software Foundation, Inc. -Copyright (c) 1998-2017,2017 Free Software Foundation, Inc. -Copyright (c) 1999-2004,2005 Free Software Foundation, Inc.. -License:MIT License -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, distribute with modifications, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR -THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Except as contained in this notice, the name(s) of the above copyright -holders shall not be used in advertising or otherwise to promote the -sale, use or other dealings in this Software without prior written -authorization. - - -Software: paramiko 2.6.0 +Software: paramiko 2.7.2 Copyright notice:Copyright (C) 2003-2007 Robey Pointer Copyright (C) 2010 Sofian Brabez Copyright (C) 2008 Robey Pointer @@ -2340,7 +1951,7 @@ necessary. Here is a sample; alter the names: That's all there is to it! -Software: huaweicloud-sdk-c-obs 3.1.3 +Software: huaweicloud-sdk-c-obs 3.19.9.3 Copyright notice:Copyright (C) 1999-2003, 2005-2006, 2008-2011 Free Software Foundation, Inc. Copyright 1996-1999 by David Turner, Robert Wilhelm, and Werner Lemberg. @@ -4622,7 +4233,7 @@ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION limitations under the License. -------------------------------------------------------------------- -Software: Kerberos 5 (KRB5) 1.17.1 +Software: Kerberos 5 (KRB5) 1.18.3 Copyright notice: Copyright 1997, 1998, 1999 Computing Research Labs, Copyright 1998-2008 The OpenLDAP Foundation. Copyright 1999 Computing Research Labs, New Mexico State University @@ -5047,7 +4658,7 @@ Copyright © 1985-2002 by the Massachusetts Institute of Technology. Export of software employing encryption from the United States of America may require a specific license from the United States Government. It is the responsibility of any person or organization contemplating export to obtain such a license before exporting. WITHIN THAT CONSTRAINT, permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of M.I.T. not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. Furthermore if you modify this software you must label your software as modified software and not distribute it in such a fashion that it might be confused with the original MIT software. M.I.T. makes no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. -Software: Curl and Libcurl 7.68.0 +Software: Curl and Libcurl 7.78.0 Copyright notice:Copyright (C) 2006-2015 wolfSSL Inc. Copyright (C) 2017 - 2019 Red Hat, Inc. Copyright (C) 1998 - 2019, Daniel Stenberg, , et al. @@ -5156,480 +4767,240 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization of the copyright holder. -Software: orc 1.5.7 -Copyright notice:Copyright 2015 The Apache Software Foundation. -Copyright 2016 The Apache Software Foundation. -(c) Copyright [2008-2015] Tom Preston-Werner -Copyright 2013-2019 The Apache Software Foundation -(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P -Copyright 2013 Scott Jehl -License:Apache License Version 2.0 +Software: libiconv 1.16 +Copyright notice:Copyright (C) 1999-2001, 2003, 2011 Bruno Haible. +Copyright (C) 1999-2001 Bruno Haible. +Copyright (C) 1996-2015 Free Software Foundation, Inc. +Copyright (C) 1999-2003, 2008 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2005, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2006, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2006, 2010 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2005, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001 Free Software Foundation, Inc. +Copyright (C) 1999-2002 Free Software Foundation, Inc. +Copyright (C) 1999-2005, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2004-2011, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2004, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2012, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2011, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2005, 2016 Free Software Foundation, Inc. +Copyright (C) 2005, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2005, 2007, 2016 Free Software Foundation, Inc. +Copyright (C) 2001, 2005, 2016 Free Software Foundation, Inc. +Copyright (C) 2001, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2007, 2016 Free Software Foundation, Inc. +Copyright (C) 2001 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2005, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2005, 2011, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2005, 2008, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2000, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2003, 2005, 2008, 2012, 2017 Free Software Foundation, Inc. +Copyright (C) 1999-2003, 2005, 2008, 2012 Free Software Foundation, Inc. +Copyright (C) 2000-2002, 2005-2006, 2008-2009, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2003, 2005, 2011-2012, 2016, 2018 Free Software Foundation, Inc. +Copyright (C) 1999-2006, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2010, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2008, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2008, 2011, 2016, 2018 Free Software Foundation, Inc. +Copyright (C) 1999-2008, 2011, 2018 Free Software Foundation, Inc. +Copyright (C) 1999-2009 Free Software Foundation, Inc. +Copyright (C) 1999-2004, 2008, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2008, 2011-2012, 2016, 2018 Free Software Foundation, Inc. +Copyright (C) 1999-2004, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2004, 2012 Free Software Foundation, Inc. +Copyright (C) 1999-2007, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 2000 Free Software Foundation, Inc. +Copyright (C) 1999-2003, 2005-2006, 2008 Free Software Foundation, Inc. +Copyright (C) 2000-2002, 2005-2006, 2008-2009, 2011 Free Software Foundation, Inc. +Copyright (C) 2003-2006, 2008-2018 Free Software Foundation, Inc. +ISSLASH(C) tests whether C is a directory separator character. +Copyright (C) 2003, 2005, 2008-2017 Free Software Foundation, Inc. +Copyright (C) 1999-2007, 2016 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2004, 2016, 2019 Free Software Foundation, Inc. +Copyright (C) 1999-2003 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2008, 2011, 2016 Free Software Foundation, Inc. +Copyright (C) 2001, 2004, 2011 Free Software Foundation, Inc. +Copyright (C) 1999-2001, 2016, 2019 Free Software Foundation, Inc. +Copyright (C) 1991 Free Software Foundation, Inc. +Our method of protecting your rights has two steps: (1) copyright +"copyright" line and a pointer to where the full notice is found. +Copyright (C) 1999-2002, 2011-2012, 2016, 2018 Free Software Foundation, Inc. +Copyright (C) 1999-2004, 2006-2007, 2010, 2012, 2016, 2018 Free Software Foundation, Inc. +Copyright (C) 1999-2002, 2012, 2018 Free Software Foundation, Inc. +Copyright (C) 2001-2002, 2005-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2002, 2006-2019 Free Software Foundation, Inc. +Copyright (C) 2005, 2012 Free Software Foundation, Inc. +Copyright (C) 2000, 2004-2005, 2012, 2016 Free Software Foundation, Inc. +Copyright (C) 2000-2002, 2004-2005 Free Software Foundation, Inc. +Copyright (C) 2008, 2018 Free Software Foundation, Inc. +Copyright (C) 2009, 2011 Free Software Foundation, Inc. +Copyright (C) 86, 91, 1995-1998, 1999, 2012 Free Software Foundation, Inc. +Copyright (C) 2000-2006, 2008-2018 Free Software Foundation, Inc. +Copyright (C) 2018 Free Software Foundation, Inc. +Copyright (C) 2000-2009, 2011-2012, 2016-2019 Free Software Foundation, Inc. +Copyright (C) 2006, 2011-2018 Free Software Foundation, Inc. +Copyright (C) 2006 Free Software Foundation, Inc. +Copyright (C) 2007 Free Software Foundation, Inc. +(1) assert copyright on the software, and (2) offer you this License +"Copyright" also means copyright-like laws that apply to other kinds of +in a fashion requiring copyright permission, other than the making of an +copyright on the Program, and are irrevocable provided the stated +not control copyright. Those thus making or running the covered works +for which you have or can give appropriate copyright permission. +Copyright (C) 1995, 1999, 2001-2007 Free Software Foundation, Inc. +Copyright (C) 2001-2004, 2006 Free Software Foundation, Inc. +Copyright (C) 1990-1996, 2000-2003, 2005-2007 Free Software Foundation, Inc. +Copyright (C) 1990, 1996, 2000-2003, 2005-2006 Free Software Foundation, Inc. +Copyright +Copyright (C) 2011-2019 Free Software Foundation, Inc. +Copyright (C) 2001, 2003-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2001, 2003-2004, 2007, 2009-2019 Free Software Foundation, +Inc. +Copyright (C) 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1990, 1998-2001, 2003-2006, 2009-2019 Free Software +Copyright 2017-2019 Free Software Foundation, Inc. +Copyright (C) 2001, 2003, 2005, 2008-2019 Free Software Foundation, Inc. +Copyright (C) 2010-2019 Free Software Foundation, Inc. +Copyright (C) 2000-2003, 2006, 2008-2019 Free Software Foundation, Inc. +Copyright (C) 1996-2019 Free Software Foundation, Inc. +Copyright (C) 1990, 1998, 2000-2001, 2003-2006, 2009-2019 Free Software +Copyright (C) 1998, 2001, 2003-2006, 2009-2019 Free Software Foundation, +Copyright (C) 2000-2001, 2004-2006, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2008-2019 Free Software Foundation, Inc. +Copyright (C) 1990-1998, 2000-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1995-1997, 2003, 2006, 2008-2019 Free Software Foundation, +Copyright (C) 2006-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2004, 2007-2019 Free Software Foundation, Inc. +Copyright (C) 2016-2019 Free Software Foundation, Inc. +Copyright (C) 1995-1998, 2000-2002, 2004-2006, 2009-2019 Free Software +Copyright (C) 2001-2019 Free Software Foundation, Inc. +Copyright 2016-2019 Free Software Foundation, Inc. +Copyright (C) 2000-2003, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1997-2006, 2008-2019 Free Software Foundation, Inc. +Copyright (C) 1997-1998, 2006-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2003, 2006-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2003-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1992, 1999, 2001, 2003, 2005, 2009-2019 Free Software +Copyright (C) 2001-2003, 2005-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2004, 2006, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2003-2019 Free Software Foundation, Inc. +Copyright (C) 2002-2003, 2005-2006, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2003-2006, 2008-2019 Free Software Foundation, Inc. +Copyright (C) 2003, 2005, 2008-2019 Free Software Foundation, Inc. +Copyright (C) 2003, 2005-2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1993-1994, 1998, 2002-2006, 2009-2019 Free Software +Copyright (C) 2002, 2006, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1992, 1995-2003, 2005-2019 Free Software Foundation, Inc. +Copyright (C) 2005, 2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2017-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2003, 2006-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2002, 2004-2019 Free Software Foundation, Inc. +Copyright (C) 2004, 2007-2019 Free Software Foundation, Inc. +Copyright (C) 1995, 2001-2004, 2006-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2002, 2007, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2007-2019 Free Software Foundation, Inc. +Copyright (C) 1995-1996, 2001-2019 Free Software Foundation, Inc. +Copyright (C) 1990, 2001, 2003-2006, 2009-2019 Free Software Foundation, +Copyright 2018-2019 Free Software Foundation, Inc. +Copyright (C) 2005-2019 Free Software Foundation, Inc. +Copyright (C) 2002, 2005-2006, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2001-2002, 2005, 2007, 2009-2019 Free Software Foundation, +Copyright (C) 2001-2004, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 2005-2006, 2009-2019 Free Software Foundation, Inc. +Copyright (C) 1990-2000, 2003-2004, 2006-2019 Free Software Foundation, Inc. +License: LGPL V2.0 +GNU Library General Public License +Version 2, June 1991 -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +Copyright (C) 1991 Free Software Foundation, Inc. +59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. - 1. Definitions. +[This is the first released version of the Library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +Preamble +The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +This license, the Library General Public License, applies to some specially designated Free Software Foundation software, and to other libraries whose authors who decide to use it. You can use it for your libraries too. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library, or if you modify it. - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link a program with the library, you must provide complete object files to the recipients so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +Our method of protecting your rights has two steps: (1) copyright the library, and (2) offer you this license, which gives you legal permission to copy, distribute and/or modify the library. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +Also, for each distributor's protection, we want to make certain that everyone understands that there is no warranty for this free library. If the library is modified by someone else and passed on, we want its recipients to know that what they have is not the original version, so that any problems introduced by others will not reflect on the original authors' reputations. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that companies distributing free software will individually obtain patent licenses, thus in effect transforming the program into proprietary software. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +Most GNU software, including some libraries, is covered by the ordinary GNU General Public License, which was designed for utility programs. This license, the GNU Library General Public License, applies to certain designated libraries. This license is quite different from the ordinary one; be sure to read it in full, and don't assume that anything in it is the same as in the ordinary license. - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +The reason we have a separate public license for some libraries is that they blur the distinction we usually make between modifying or adding to a program and simply using it. Linking a program with a library, without changing the library, is in some sense simply using the library, and is analogous to running a utility program or application program. However, in a textual and legal sense, the linked executable is a combined work, a derivative of the original library, and the ordinary General Public License treats it as such. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Because of this blurred distinction, using the ordinary General Public License for libraries did not effectively promote software sharing, because most developers did not use the libraries. We concluded that weaker conditions might promote sharing better. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +However, unrestricted linking of non-free programs would deprive the users of those programs of all benefit from the free status of the libraries themselves. This Library General Public License is intended to permit developers of non-free programs to use free libraries, while preserving your freedom as a user of such programs to change the free libraries that are incorporated in them. (We have not seen how to achieve this as regards changes in header files, but we have achieved it as regards changes in the actual functions of the Library.) The hope is that this will lead to faster development of free libraries. - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, while the latter only works together with the library. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION +0. This License Agreement applies to any software library which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Library General Public License (also called "this License"). Each licensee is addressed as "you". - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +"Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability contains - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +a) The modified work must itself be a software library. - END OF TERMS AND CONDITIONS +b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. - APPENDIX: How to apply the Apache License to your work. +c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. - Copyright [yyyy] [name of copyright owner] +(For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - http://www.apache.org/licenses/LICENSE-2.0 +Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. +3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. -APACHE ORC SUBCOMPONENTS: +Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. -The Apache ORC project contains subcomponents with separate copyright -notices and license terms. Your use of the source code for the these -subcomponents is subject to the terms and conditions of the following -licenses. +This option is useful when you wish to copy part of the code of the Library into a program that is not a library. ----- -Parts of the site formatting includes software developed by Tom Preston-Werner -that are licensed under the MIT License (MIT): +4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. -(c) Copyright [2008-2015] Tom Preston-Werner +If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. - -Software: libiconv 1.16 -Copyright notice:Copyright (C) 1999-2001, 2003, 2011 Bruno Haible. -Copyright (C) 1999-2001 Bruno Haible. -Copyright (C) 1996-2015 Free Software Foundation, Inc. -Copyright (C) 1999-2003, 2008 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2005, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2006, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2006, 2010 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2005, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001 Free Software Foundation, Inc. -Copyright (C) 1999-2002 Free Software Foundation, Inc. -Copyright (C) 1999-2005, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2004-2011, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2004, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2012, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2011, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2005, 2016 Free Software Foundation, Inc. -Copyright (C) 2005, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2005, 2007, 2016 Free Software Foundation, Inc. -Copyright (C) 2001, 2005, 2016 Free Software Foundation, Inc. -Copyright (C) 2001, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2007, 2016 Free Software Foundation, Inc. -Copyright (C) 2001 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2005, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2005, 2011, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2005, 2008, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2000, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2003, 2005, 2008, 2012, 2017 Free Software Foundation, Inc. -Copyright (C) 1999-2003, 2005, 2008, 2012 Free Software Foundation, Inc. -Copyright (C) 2000-2002, 2005-2006, 2008-2009, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2003, 2005, 2011-2012, 2016, 2018 Free Software Foundation, Inc. -Copyright (C) 1999-2006, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2010, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2008, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2008, 2011, 2016, 2018 Free Software Foundation, Inc. -Copyright (C) 1999-2008, 2011, 2018 Free Software Foundation, Inc. -Copyright (C) 1999-2009 Free Software Foundation, Inc. -Copyright (C) 1999-2004, 2008, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2008, 2011-2012, 2016, 2018 Free Software Foundation, Inc. -Copyright (C) 1999-2004, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2004, 2012 Free Software Foundation, Inc. -Copyright (C) 1999-2007, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 2000 Free Software Foundation, Inc. -Copyright (C) 1999-2003, 2005-2006, 2008 Free Software Foundation, Inc. -Copyright (C) 2000-2002, 2005-2006, 2008-2009, 2011 Free Software Foundation, Inc. -Copyright (C) 2003-2006, 2008-2018 Free Software Foundation, Inc. -ISSLASH(C) tests whether C is a directory separator character. -Copyright (C) 2003, 2005, 2008-2017 Free Software Foundation, Inc. -Copyright (C) 1999-2007, 2016 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2004, 2016, 2019 Free Software Foundation, Inc. -Copyright (C) 1999-2003 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2008, 2011, 2016 Free Software Foundation, Inc. -Copyright (C) 2001, 2004, 2011 Free Software Foundation, Inc. -Copyright (C) 1999-2001, 2016, 2019 Free Software Foundation, Inc. -Copyright (C) 1991 Free Software Foundation, Inc. -Our method of protecting your rights has two steps: (1) copyright -"copyright" line and a pointer to where the full notice is found. -Copyright (C) 1999-2002, 2011-2012, 2016, 2018 Free Software Foundation, Inc. -Copyright (C) 1999-2004, 2006-2007, 2010, 2012, 2016, 2018 Free Software Foundation, Inc. -Copyright (C) 1999-2002, 2012, 2018 Free Software Foundation, Inc. -Copyright (C) 2001-2002, 2005-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2002, 2006-2019 Free Software Foundation, Inc. -Copyright (C) 2005, 2012 Free Software Foundation, Inc. -Copyright (C) 2000, 2004-2005, 2012, 2016 Free Software Foundation, Inc. -Copyright (C) 2000-2002, 2004-2005 Free Software Foundation, Inc. -Copyright (C) 2008, 2018 Free Software Foundation, Inc. -Copyright (C) 2009, 2011 Free Software Foundation, Inc. -Copyright (C) 86, 91, 1995-1998, 1999, 2012 Free Software Foundation, Inc. -Copyright (C) 2000-2006, 2008-2018 Free Software Foundation, Inc. -Copyright (C) 2018 Free Software Foundation, Inc. -Copyright (C) 2000-2009, 2011-2012, 2016-2019 Free Software Foundation, Inc. -Copyright (C) 2006, 2011-2018 Free Software Foundation, Inc. -Copyright (C) 2006 Free Software Foundation, Inc. -Copyright (C) 2007 Free Software Foundation, Inc. -(1) assert copyright on the software, and (2) offer you this License -"Copyright" also means copyright-like laws that apply to other kinds of -in a fashion requiring copyright permission, other than the making of an -copyright on the Program, and are irrevocable provided the stated -not control copyright. Those thus making or running the covered works -for which you have or can give appropriate copyright permission. -Copyright (C) 1995, 1999, 2001-2007 Free Software Foundation, Inc. -Copyright (C) 2001-2004, 2006 Free Software Foundation, Inc. -Copyright (C) 1990-1996, 2000-2003, 2005-2007 Free Software Foundation, Inc. -Copyright (C) 1990, 1996, 2000-2003, 2005-2006 Free Software Foundation, Inc. -Copyright -Copyright (C) 2011-2019 Free Software Foundation, Inc. -Copyright (C) 2001, 2003-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2001, 2003-2004, 2007, 2009-2019 Free Software Foundation, -Inc. -Copyright (C) 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1990, 1998-2001, 2003-2006, 2009-2019 Free Software -Copyright 2017-2019 Free Software Foundation, Inc. -Copyright (C) 2001, 2003, 2005, 2008-2019 Free Software Foundation, Inc. -Copyright (C) 2010-2019 Free Software Foundation, Inc. -Copyright (C) 2000-2003, 2006, 2008-2019 Free Software Foundation, Inc. -Copyright (C) 1996-2019 Free Software Foundation, Inc. -Copyright (C) 1990, 1998, 2000-2001, 2003-2006, 2009-2019 Free Software -Copyright (C) 1998, 2001, 2003-2006, 2009-2019 Free Software Foundation, -Copyright (C) 2000-2001, 2004-2006, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2008-2019 Free Software Foundation, Inc. -Copyright (C) 1990-1998, 2000-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1995-1997, 2003, 2006, 2008-2019 Free Software Foundation, -Copyright (C) 2006-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2004, 2007-2019 Free Software Foundation, Inc. -Copyright (C) 2016-2019 Free Software Foundation, Inc. -Copyright (C) 1995-1998, 2000-2002, 2004-2006, 2009-2019 Free Software -Copyright (C) 2001-2019 Free Software Foundation, Inc. -Copyright 2016-2019 Free Software Foundation, Inc. -Copyright (C) 2000-2003, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1997-2006, 2008-2019 Free Software Foundation, Inc. -Copyright (C) 1997-1998, 2006-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2003, 2006-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2003-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1992, 1999, 2001, 2003, 2005, 2009-2019 Free Software -Copyright (C) 2001-2003, 2005-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2004, 2006, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2003-2019 Free Software Foundation, Inc. -Copyright (C) 2002-2003, 2005-2006, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2003-2006, 2008-2019 Free Software Foundation, Inc. -Copyright (C) 2003, 2005, 2008-2019 Free Software Foundation, Inc. -Copyright (C) 2003, 2005-2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1993-1994, 1998, 2002-2006, 2009-2019 Free Software -Copyright (C) 2002, 2006, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1992, 1995-2003, 2005-2019 Free Software Foundation, Inc. -Copyright (C) 2005, 2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2017-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2003, 2006-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2002, 2004-2019 Free Software Foundation, Inc. -Copyright (C) 2004, 2007-2019 Free Software Foundation, Inc. -Copyright (C) 1995, 2001-2004, 2006-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2002, 2007, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2007-2019 Free Software Foundation, Inc. -Copyright (C) 1995-1996, 2001-2019 Free Software Foundation, Inc. -Copyright (C) 1990, 2001, 2003-2006, 2009-2019 Free Software Foundation, -Copyright 2018-2019 Free Software Foundation, Inc. -Copyright (C) 2005-2019 Free Software Foundation, Inc. -Copyright (C) 2002, 2005-2006, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2001-2002, 2005, 2007, 2009-2019 Free Software Foundation, -Copyright (C) 2001-2004, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 2005-2006, 2009-2019 Free Software Foundation, Inc. -Copyright (C) 1990-2000, 2003-2004, 2006-2019 Free Software Foundation, Inc. -License: LGPL V2.0 -GNU Library General Public License -Version 2, June 1991 - -Copyright (C) 1991 Free Software Foundation, Inc. -59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -[This is the first released version of the Library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] - -Preamble -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. - -This license, the Library General Public License, applies to some specially designated Free Software Foundation software, and to other libraries whose authors who decide to use it. You can use it for your libraries too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library, or if you modify it. - -For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link a program with the library, you must provide complete object files to the recipients so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. - -Our method of protecting your rights has two steps: (1) copyright the library, and (2) offer you this license, which gives you legal permission to copy, distribute and/or modify the library. - -Also, for each distributor's protection, we want to make certain that everyone understands that there is no warranty for this free library. If the library is modified by someone else and passed on, we want its recipients to know that what they have is not the original version, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that companies distributing free software will individually obtain patent licenses, thus in effect transforming the program into proprietary software. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -Most GNU software, including some libraries, is covered by the ordinary GNU General Public License, which was designed for utility programs. This license, the GNU Library General Public License, applies to certain designated libraries. This license is quite different from the ordinary one; be sure to read it in full, and don't assume that anything in it is the same as in the ordinary license. - -The reason we have a separate public license for some libraries is that they blur the distinction we usually make between modifying or adding to a program and simply using it. Linking a program with a library, without changing the library, is in some sense simply using the library, and is analogous to running a utility program or application program. However, in a textual and legal sense, the linked executable is a combined work, a derivative of the original library, and the ordinary General Public License treats it as such. - -Because of this blurred distinction, using the ordinary General Public License for libraries did not effectively promote software sharing, because most developers did not use the libraries. We concluded that weaker conditions might promote sharing better. - -However, unrestricted linking of non-free programs would deprive the users of those programs of all benefit from the free status of the libraries themselves. This Library General Public License is intended to permit developers of non-free programs to use free libraries, while preserving your freedom as a user of such programs to change the free libraries that are incorporated in them. (We have not seen how to achieve this as regards changes in header files, but we have achieved it as regards changes in the actual functions of the Library.) The hope is that this will lead to faster development of free libraries. - -The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, while the latter only works together with the library. - -Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION -0. This License Agreement applies to any software library which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Library General Public License (also called "this License"). Each licensee is addressed as "you". - -A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. - -The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) - -"Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. - -1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - -a) The modified work must itself be a software library. - -b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. - -c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. - -d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. - -(For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. - -Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. - -This option is useful when you wish to copy part of the code of the Library into a program that is not a library. - -4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. - -If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. - -5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. - -However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. - -When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. - -If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) +If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. @@ -5721,7 +5092,7 @@ Ty Coon, President of Vice That's all there is to it! -Software: pcre 8.42 +Software: pcre 8.45 Copyright notice:Copyright (c) 1997-2018 University of Cambridge Copyright(c) 2010-2018 Zoltan Herczeg Copyright(c) 2009-2018 Zoltan Herczeg @@ -5753,7 +5124,7 @@ Redistribution and use in source and binary forms, with or without modification, THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Software: psutil 5.6.7 +Software: psutil 5.9.0 Copyright notice:Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved. Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. Copyright (c) 2009, Giampaolo Rodola', Himanshu Shekhar. @@ -5792,15 +5163,15 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Software: Netifaces 0.10.9 -Copyright notice:Version: 0.10.7 +Software: Netifaces 0.11.0 +Copyright notice:Version: 0.11.0 Summary: Portable network interface information. Home-page: https://github.com/al45tair/netifaces Author: Alastair Houghton Author-email: alastair@alastairs-place.net Copyright (c) 2007-2018 Alastair Houghton License: MIT License -Description: netifaces 0.10.7 +Description: netifaces 0.11.0 ================ +-------------+------------------+ @@ -5836,8 +5207,8 @@ Description: netifaces 0.10.7 First you need to install it, which you can do by typing:: - tar xvzf netifaces-0.10.7.tar.gz - cd netifaces-0.10.7 + tar xvzf netifaces-0.11.0.tar.gz + cd netifaces-0.11.0 python setup.py install **Note that you will need the relevant developer tools for your platform**, @@ -6008,7 +5379,7 @@ Description: netifaces 0.10.7 features/bugfixes have been included as well. See the CHANGELOG for a more complete list of changes. -Software: nghttp2 1.39.2 +Software: nghttp2 1.43.0 Copyright notice:Copyright 2011 Kitware, Inc. Copyright (c) 2014 Tatsuhiro Tsujikawa Copyright (c) 2016 Tatsuhiro Tsujikawa @@ -6051,7 +5422,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Software: python::cryptography 2.7 +Software: python::cryptography 3.3.2 Copyright notice:2. Grant of Copyright License. Subject to the terms and conditions of copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the @@ -6064,7 +5435,7 @@ License: Apache License 2.0 Please see above -Software: python::cffi 1.12.3 +Software: python::cffi 1.15.0 Copyright notice:ffi.c - Copyright (c) 1996, 1998, 1999, 2001 Red Hat, Inc. Copyright (c) 2002 Ranjit Mathew Copyright (c) 2002 Bo Thorsen @@ -6103,7 +5474,7 @@ documentation is licensed as follows: License: MIT License Please see above -Software: pycparser 2.19 +Software: pycparser 2.21 Copyright notice:All rights reserved. Copyright (C) 2017 Copyright (C) 2001-2017 Copyright (c) 2008-2017, Eli Bendersky @@ -6133,7 +5504,7 @@ OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: BSD 3-Clause License Please see above -Software: python::asn1crypto 1.2.0 +Software: python::asn1crypto 1.3.0 Copyright notice:Copyright (c) 2015-2019 Will Bond Copyright (c) 2009 Raymond Hettinger License: MIT License @@ -6159,7 +5530,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Software: idna 2.8 +Software: idna 3.7 Copyright notice:Copyright (c) 2013-2018, Kim Davies. All rights reserved. Copyright (c) 2001-2014 Python Software Foundation; All Rights Reserved Copyright (c) 1991-2014 Unicode, Inc. All rights reserved. @@ -6243,7 +5614,7 @@ License: 2-clause BSD-like license Please see above -Software: six 1.12.0 +Software: six 1.15.0 Copyright notice:Copyright (c) 2010-2018 Benjamin Peterson Permission is hereby granted, free of charge, to any person obtaining a copy of @@ -6315,7 +5686,7 @@ Agreement. License: Python Software Foundation License V2 Please see above -Software: pyasn1 0.4.7 +Software: pyasn1 0.4.8 Copyright notice: Copyright (c) 2005-2018, Ilya Etingof All rights reserved. @@ -6343,7 +5714,7 @@ POSSIBILITY OF SUCH DAMAGE. License: BSD 2-Clause License Please see above -Software: PyNaCl 1.3.0 +Software: PyNaCl 1.4.0 Copyright notice:author (Thomas Pornin) has waived all copyright and related or Copyright (c) 2015 Thomas Pornin Copyright 2005,2007,2009 Colin Percival @@ -6544,7 +5915,7 @@ License: Apache License V2.0 Please see above -Software: bcrypt 3.1.7 +Software: bcrypt 3.2.0 Copyright notice: Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -6796,7 +6167,7 @@ limitations under the License. License: Apache License V2.0 Please see above -Software: zlib 1.2.11 +Software: zlib 1.2.12 Copyright notice: Copyright (C) 1995-2006, 2011, 2016 Jean-loup Gailly Copyright (C) 2011, 2016 Mark Adler Copyright (C) 1995-2006, 2010, 2011, 2016 Jean-loup Gailly @@ -6865,7 +6236,7 @@ misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. -Software: numactl 2.0.13 +Software: numactl 2.0.14 Copyright notice:Copyright (C) 2010 Intel Corporation Copyright (C) 2005 Andi Kleen, SuSE Labs. Copyright (C) 2003,2004,2005,2008 Andi Kleen,SuSE Labs andCliff Wickman,SGI. @@ -7069,7 +6440,7 @@ Ty Coon, President of Vice That's all there is to it! -Software: pyOpenSSL 19.0.0 +Software: pyOpenSSL 20.0.1 Copyright notice:Copyright (C) Jean-Paul Calderone Copyright (C) AB Strakt Copyright (C) Jean-Paul Calderone 2008-2015, All rights reserved @@ -7280,86 +6651,7 @@ License:Apache License V2.0 See the License for the specific language governing permissions and limitations under the License. -Software: Apache Arrow v0.11.1 -Copyright notice:Copyright (c) 2011 The LevelDB Authors. All rights reserved. -Copyright (c) 2003-2013 University of Illinois at Urbana-Champaign. -Copyright (C) 2012-2016, Yann Collet -Copyright (C) 2012-2016, Yann Collet. -Copyright (C) 2017-2018 Dremio Corporation Licensed under the Apache -Copyright (c) 2013-2016, Matt Terry and Matthew Brett (BSD 2-clause) -Copyright (c) 2006-2010, Salvatore Sanfilippo -Copyright (c) 2006-2012, Salvatore Sanfilippo -Copyright (c) 2009-2012, Salvatore Sanfilippo -Copyright (c) 2012, Joyent, Inc. All rights reserved. -Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com -Copyright 2013 Sharvil Nanavati -Copyright (c) 2009 Google Inc. All rights reserved. -Copyright 2014 Cloudera, Inc. -Copyright 2016 The Apache Software Foundation -Copyright (c) MapBox -Copyright (c) 2002-2003 -Copyright (C) 2017-2018 Dremio Corporation -Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team -Copyright (c) 2008-2011 AQR Capital Management, LLC -Copyright (C) 2011-12, Dynamic NDArray Developers -Copyright (C) 2007 The Guava Authors -Copyright (c) 2016 Giles Hall -Copyright (C) 2015 Dato, Inc. -Copyright https:code.google.compmman-win32 -Copyright 2012 Continuum Analytics, Inc. -Copyright 2001-2009 Kitware, Inc. -Copyright 2012-2014 Continuum Analytics, Inc. -Copyright 2012 Cloudera Inc. -Copyright 2008 Google Inc. All Rights Reserved. -Copyright 2011 Kitware, Inc. -Copyright (c) 2015, 2016, 2017 Howard Hinnant -Copyright (c) 2016 Adrian Colomitchi -Copyright (c) 2017 Florian Dang -Copyright (c) 2017 Paul Thompson -Copyright (c) 2018 Tomasz Kami��ski -Copyright 2015 Cloudera Inc. -Copyright (c) Austin Appleby (aappleby (AT) gmail) -copyright (c) Google inc and (c) The Chromium Authors and licensed under the -Apache 2.0 License or the under the 3-clause BSD license: -Copyright (c) 2013 The Chromium Authors. All rights reserved. -Copyright 2015 The TensorFlow Authors. All Rights Reserved. -Copyright (c) 2005-2017, NumPy Developers. -Copyright 2014 Google Inc. -Copyright 2015 Microsoft Corporation. All rights reserved. -Copyright (C) 2002-2017 Jason Evans . -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. -Copyright (c) 2009 The Go Authors. All rights reserved. -Copyright 2016 Cloudera Inc. -Copyright (c) 2017, Jeroen Ooms and Jim Hester -Copyright (c) 2016 Ray Project (https:github.com/ray-project/ray) -Copyright (c) 2009 Carnegie Mellon University. -Copyright https://code.google.com/p/mman-win32/ -Copyright (c) 2013-2016, Matt Terry and Matthew Brett; all rights reserved. -Copyright (c) 2015 Cloudera, Inc. -Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. -License:Apache License V2.0 -Please see above - - -Software: Apache Thrift 0.13.0 -Copyright notice:Copyright (c) <2006> -Copyright (c) 2007 Thomas Porschberg -Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. -Copyright (C) 1999 - 2007 Markus Mottl -Copyright 2007 by Nathan C. Myers ; some rights reserved. -Copyright (c) 2006-2008 Alexander Chemeris -Copyright (c) 2008- Patrick Collison -Copyright (c) 2006- Facebook -Copyright 2012 Twitter, Inc -Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved. -Copyright (C) 2006 - 2019, The Apache Software Foundation -Copyright (C) 2012 Raphael Kubo da Costa -License:Apache License V2.0 -Please see above - - -Software: boost 1.71.0 +Software: boost 1.76.0 Copyright notice:Copyright (C) 2005, 2007 Douglas Gregor Copyright Rene Rivera 2016 Copyright 2002 William E. Kempf @@ -7404,22 +6696,6 @@ FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Software: brotli v0.6.0 -Copyright notice:Copyright 2016 Google Inc. All Rights Reserved. -Copyright 2015 Google Inc. All Rights Reserved. -Copyright 2017 Google Inc. All Rights Reserved. -Copyright 2013 Google Inc. All Rights Reserved. -Copyright 2016 The Brotli Authors. All rights reserved. -Copyright 2015 The Chromium Authors. All rights reserved. -Copyright 2014 Google Inc. All Rights Reserved. -Copyright 2010 Google Inc. All Rights Reserved. -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. -Copyright 2015 The Brotli Authors. All rights reserved. -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. -License:MIT License -Please see above - - Software: c-ares 1.15.0 Copyright notice:Copyright 2008, Google Inc. Copyright 2005, Google Inc. @@ -7466,64 +6742,7 @@ License:MIT License Please see above -Software: double-conversion 3.1.1 -Copyright notice:Copyright 2006-2011, the V8 project authors. All rights reserved. -Copyright 2008 the V8 project authors. All rights reserved. -Copyright 2006-2008 the V8 project authors. All rights reserved. -Copyright 2010 the V8 project authors. All rights reserved. -Copyright 2011 the V8 project authors. All rights reserved. -Copyright 2011, the V8 project authors. All rights reserved. -Copyright 2012 the V8 project authors. All rights reserved. -License:BSD 3-Clause License -Please see above - - -Software: flatbuffers 1.11.0 -Copyright notice:Copyright (c) 2015 Google, Inc. -Copyright 2014 Google Inc. All rights reserved. -Copyright 2015 Google Inc. All rights reserved. -Copyright 2017 Google Inc. All rights reserved. -Copyright 2008 Google Inc. All rights reserved. -2. Grant of Copyright License. Subject to the terms and conditions of -copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the -(c) You must retain, in the Source form of any Derivative Works -You may add Your own copyright statement to Your modifications and -may provide additional or different license terms and conditions -Copyright 2014 Google Inc. -Copyright (c) 2013 Google, Inc. -Copyright (c) 2014 Google, Inc. -Copyright 2012, the Dart project authors. All rights reserved. -Copyright 2018 Google Inc. All rights reserved. -Copyright 2014 Stefan.Eilemann@epfl.ch -Copyright 2016 Google Inc. All rights reserved. -Copyright 2015 The Chromium Authors. All rights reserved. -Copyright 2015, Google Inc. -Copyright 2015 Google Inc. -Copyright 2018 Dan Field. All rights reserved. -Copyright 2018 Dan Field -License:Apache License V2.0 -Please see above - - -Software: google/glog 0.4.0 -Copyright notice:Copyright (c) 2007, Google Inc. -Copyright (c) 2008, Google Inc. -Copyright (c) 2006, Google Inc. -Copyright (c) 2009, Google Inc. -Copyright (c) 1999, Google Inc. -Copyright (c) 2002, Google Inc. -Copyright (c) 2000 - 2007, Google Inc. -Copyright (c) 2005 - 2007, Google Inc. -Copyright (c) 2004, Google Inc. -Copyright (c) 2003, Google Inc. -Copyright (c) 1999, 2007, Google Inc. -Copyright (c) 2003-2008, Jouni Malinen and contributors -License:BSD 3-Clause License -Please see above - - -Software: grpc 1.22.0 +Software: grpc 1.28.1 Copyright notice:Copyright 2018 The gRPC Authors Copyright 2015 gRPC authors. Copyright 2017 gRPC authors. @@ -7556,7 +6775,7 @@ License:Apache License V2.0 Please see above -Software: libcgroup 0.41-21.el7 +Software: libcgroup 2.0 Copyright notice:Copyright IBM Corporation. 2007 Copyright IBM Corporation. 2008 Copyright Red Hat, Inc. 2012 @@ -7581,7 +6800,7 @@ License:LGPL V2.1 Please see above -Software: libxml2 2.9.9 +Software: libxml2 2.9.13 Copyright notice:Copyright (C) 1998-2012 Daniel Veillard. All Rights Reserved. Copyright (C) 2003-2012 Daniel Veillard. Copyright (C) 2000,2012 Bjorn Reese and Daniel Veillard. @@ -7598,13 +6817,7 @@ License:MIT License Please see above -Software: rapidjson 4b3d7c2f42142f10b888e580c515f60ca98e2ee9 -Copyright notice: -License:MIT License -Please see above - - -Software: Zstandard 1.4.4 +Software: Zstandard 1.5.2 Copyright notice:Copyright (c) 2016-present, Facebook, Inc. Copyright (C) 2013-present, Yann Collet. Copyright (C) 2013-2016, Yann Collet. @@ -7645,7 +6858,7 @@ Copyright (C) 2004-2017 Mark Adler License:BSD 3-Clause License Please see above -Software: cJSON 1.7.11 +Software: cJSON 1.7.15 Copyright notice:Copyright (c) 2009-2017 Dave Gamble and cJSON contributors Copyright (c) 2009-2019 Dave Gamble and cJSON contributors Copyright (c) 2010 James Grenning and Contributed to Unity Project @@ -7658,7 +6871,7 @@ License:MIT License Please see above -Software: libevent 2.1.11 +Software: libevent 2.1.12 Copyright notice:Copyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson Copyright (c) 2010-2012 Niels Provos and Nick Mathewson Copyright (c) 1996, David Mazieres @@ -7726,3 +6939,404 @@ Copyright 2010 Kitware, Inc. License:BSD 3-Clause License Please see above + +Software: xgboost 1.4.1 +Copyright notice:Copyright (c) 2016-2023 Tianqi Chen, Microsoft Corporation, Haichen Lin, UChicago. All rights reserved. +License:Apache License V2.0 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2019 by Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Software: tornado 6.3.2 +Copyright notice:Copyright (c) 2011, Facebook, Inc. and its affiliates. All Rights Reserved. +License:Apache License V2.0 +Please see above + + +Software: aws-sdk-cpp 1.11.327 +Copyright notice:Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +License:Apache License V2.0 +Please see above + + +Software: tassl 1.1.1 +Copyright notice:Copyright (c) 2015, Cisco Systems, Inc. All Rights Reserved. +License:TaSSL OpenSSL Original SSLeay + LICENSE ISSUES + ============== + + The TaSSL toolkit stays under a triple license, i.e. both the conditions of + the TaSSL License, the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + TaSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 2016 - 2021 Beijing JN TASS Technology Co., Ltd. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by 北京江南天安科技有限公司 + * TaSSL Project.(http://www.tass.com.cn/)" + * + * 4. The name "TaSSL Project" must not be used to endorse or promote + * products derived from this software without prior written + * permission. For written permission, please contact + * TaSSL@tass.com.cn. + * + * 5. Products derived from this software may not be called "TaSSL" + * nor may "TaSSL" appear in their names without prior written + * permission of the TaSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Beijing JN TASS Technology Co., Ltd. + * TaSSL Project.(http://www.tass.com.cn/)" + * + * THIS SOFTWARE IS PROVIDED BY THE TASSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE TASSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/). + * + */ + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ -- Gitee From 1883f5f9291266864b9d55f90aea7941e5a0e565 Mon Sep 17 00:00:00 2001 From: yuchao Date: Fri, 2 Aug 2024 14:35:17 +0800 Subject: [PATCH 148/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8DA=E5=85=BC=E5=AE=B9?= =?UTF-8?q?=E6=A8=A1=E5=BC=8F=E4=B8=8B=EF=BC=8C=E9=80=9A=E8=BF=87libpq?= =?UTF-8?q?=E7=BB=91=E5=AE=9A=E7=A9=BA=E5=AD=97=E7=AC=A6=E4=B8=B2=E5=8F=98?= =?UTF-8?q?=E9=87=8F=EF=BC=8C=E4=BC=A0=E5=85=A5=E6=95=B0=E6=8D=AE=E5=BA=93?= =?UTF-8?q?=E4=B8=BA=E7=A9=BA=E5=AD=97=E7=AC=A6=E4=B8=B2=EF=BC=8C=E8=80=8C?= =?UTF-8?q?=E4=B8=8D=E6=98=AFnull=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/interfaces/libpq/fe-connect.cpp | 23 +++++++++++++++++++++- src/common/interfaces/libpq/fe-exec.cpp | 3 ++- src/include/libpq/libpq-int.h | 7 +++++++ 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/src/common/interfaces/libpq/fe-connect.cpp b/src/common/interfaces/libpq/fe-connect.cpp index da1236c9d2..bf9793955a 100644 --- a/src/common/interfaces/libpq/fe-connect.cpp +++ b/src/common/interfaces/libpq/fe-connect.cpp @@ -1699,6 +1699,22 @@ connect_errReturn: return 0; } +static void PQgetDBCompatibility(PGconn* conn) +{ + PGresult* res = PQexec(conn, "show sql_compatibility"); + if (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK) { + if (strcmp(PQgetvalue(res, 0, 0), "A") == 0) { + conn->cmpt = COMPATIBILITY_A; + } else if (strcmp(PQgetvalue(res, 0, 0), "B") == 0) { + conn->cmpt = COMPATIBILITY_B; + } else if (strcmp(PQgetvalue(res, 0, 0), "PG") == 0) { + conn->cmpt = COMPATIBILITY_PG; + } + } + PQclear(res); + res = NULL; +} + /* * connectDBComplete * @@ -1768,7 +1784,7 @@ static int connectDBComplete(PGconn* conn) * PQconnectStart) is to wait for the socket to select for writing. */ switch (flag) { - case PGRES_POLLING_OK: + case PGRES_POLLING_OK: { /* * Reset stored error messages since we now have a working @@ -1778,7 +1794,12 @@ static int connectDBComplete(PGconn* conn) #ifdef ENABLE_LITE_MODE destroyPQExpBuffer(errMsgBuf); #endif + char* dbName = conn->dbName; + if (conn->status == CONNECTION_OK && dbName != NULL && strcmp(dbName, "replication") != 0) { + PQgetDBCompatibility(conn); + } return 1; /* success! */ + } case PGRES_POLLING_READING: if (pqWaitTimed(1, 0, conn, finish_time)) { diff --git a/src/common/interfaces/libpq/fe-exec.cpp b/src/common/interfaces/libpq/fe-exec.cpp index 6e75ff1f88..77d2158c8f 100644 --- a/src/common/interfaces/libpq/fe-exec.cpp +++ b/src/common/interfaces/libpq/fe-exec.cpp @@ -1678,7 +1678,8 @@ static int PQsendQueryGuts(PGconn* conn, const char* command, const char* stmtNa /* Send parameters */ for (i = 0; i < nParams; i++) { - if ((paramValues != NULL) && (paramValues[i] != NULL)) { + if ((paramValues != NULL) && (paramValues[i] != NULL) && + (strlen(paramValues[i]) > 0 || conn->cmpt != COMPATIBILITY_A)) { int nbytes; if ((paramFormats != NULL) && paramFormats[i] != 0) { diff --git a/src/include/libpq/libpq-int.h b/src/include/libpq/libpq-int.h index c92365f5dc..63e5edb3c1 100644 --- a/src/include/libpq/libpq-int.h +++ b/src/include/libpq/libpq-int.h @@ -249,6 +249,12 @@ typedef enum { SETENV_STATE_IDLE } PGSetenvStatusType; +typedef enum { + COMPATIBILITY_A, + COMPATIBILITY_B, + COMPATIBILITY_PG +} DBCompatibility; + /* Typedef for the EnvironmentOptions[] array */ typedef struct PQEnvironmentOption { const char *envName, /* name of an environment variable */ @@ -551,6 +557,7 @@ struct pg_conn { */ char* connection_info; bool connection_extra_info; + DBCompatibility cmpt; #ifdef HAVE_CE PGClientLogic* client_logic; -- Gitee From b77b6b371f10d3e6d0729c5e137df71259c5c909 Mon Sep 17 00:00:00 2001 From: openGaussDev Date: Fri, 2 Aug 2024 18:33:42 +0800 Subject: [PATCH 149/347] optimize GSC memory state Offering: openGaussDev More detail: optimize GSC memory state --- .../utils/cache/knl_globalbasedefcache.cpp | 75 +++++++++++-------- .../utils/cache/knl_globaldbstatmanager.cpp | 5 ++ .../utils/cache/knl_globalpartdefcache.cpp | 2 +- .../utils/cache/knl_globalsysdbcache.cpp | 30 ++++---- .../utils/cache/knl_globalsystupcache.cpp | 45 +++++------ .../utils/cache/knl_globaltabdefcache.cpp | 2 +- .../backend/utils/resowner/resowner.cpp | 2 +- src/include/utils/knl_globalbasedefcache.h | 4 +- src/include/utils/knl_globalpartdefcache.h | 4 +- src/include/utils/knl_globalsyscache_common.h | 11 --- src/include/utils/knl_globalsysdbcache.h | 2 - src/include/utils/knl_globalsystabcache.h | 11 +++ src/include/utils/knl_globalsystupcache.h | 19 +++-- src/include/utils/knl_globaltabdefcache.h | 4 +- 14 files changed, 119 insertions(+), 97 deletions(-) diff --git a/src/common/backend/utils/cache/knl_globalbasedefcache.cpp b/src/common/backend/utils/cache/knl_globalbasedefcache.cpp index 673e329499..e635a659db 100644 --- a/src/common/backend/utils/cache/knl_globalbasedefcache.cpp +++ b/src/common/backend/utils/cache/knl_globalbasedefcache.cpp @@ -51,22 +51,6 @@ static uint64 GetPartEstimateSize(GlobalPartitionEntry *entry) return part_size; } -template -void GlobalBaseDefCache::RemoveElemFromBucket(GlobalBaseEntry *base) -{ - if (is_relation) { - GlobalRelationEntry *entry = (GlobalRelationEntry *)base; - uint64 rel_size = GetRelEstimateSize(entry); - pg_atomic_fetch_sub_u64(&m_base_space, AllocSetContextUsedSpace(((AllocSet)entry->rel_mem_manager))); - m_db_entry->MemoryEstimateSub(rel_size); - } else { - GlobalPartitionEntry *entry = (GlobalPartitionEntry *)base; - uint64 part_size = GetPartEstimateSize(entry); - pg_atomic_fetch_sub_u64(&m_base_space, part_size); - m_db_entry->MemoryEstimateSub(part_size); - } - m_bucket_list.RemoveElemFromBucket(&base->cache_elem); -} template void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base) { @@ -86,16 +70,11 @@ void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base template void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base); template void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base); -template void GlobalBaseDefCache::RemoveElemFromBucket(GlobalBaseEntry *base); -template void GlobalBaseDefCache::RemoveElemFromBucket(GlobalBaseEntry *base); -template void GlobalBaseEntry::Free(GlobalBaseEntry *entry) { - Assert(entry->refcount == 0); - if (is_relation) { - Assert(entry->type == GLOBAL_RELATION_ENTRY); + if (entry->type == GLOBAL_RELATION_ENTRY) { if (((GlobalRelationEntry *)entry)->rel_mem_manager != NULL) { MemoryContextDelete(((GlobalRelationEntry *)entry)->rel_mem_manager); } @@ -107,8 +86,30 @@ void GlobalBaseEntry::Free(GlobalBaseEntry *entry) } pfree(entry); } -template void GlobalBaseEntry::Free(GlobalBaseEntry *entry); -template void GlobalBaseEntry::Free(GlobalBaseEntry *entry); + +static void free_dead_defentry_internal(GlobalBaseEntry *entry, volatile uint64 *base_space, + GlobalSysDBCacheEntry *db_entry) +{ + Assert(entry->refcount == 0); + if (entry->type == GLOBAL_RELATION_ENTRY) { + if (((GlobalRelationEntry *)entry)->rel_mem_manager != NULL) { + GlobalRelationEntry *rel_entry = (GlobalRelationEntry *)entry; + uint64 rel_size = GetRelEstimateSize(rel_entry); + pg_atomic_fetch_sub_u64( + base_space, AllocSetContextUsedSpace(((AllocSet)rel_entry->rel_mem_manager))); + db_entry->MemoryEstimateSub(rel_size); + } + } else { + Assert(entry->type == GLOBAL_PARTITION_ENTRY); + if (((GlobalPartitionEntry *)entry)->part != NULL) { + GlobalPartitionEntry *part_entry = (GlobalPartitionEntry *)entry; + uint64 part_size = GetPartEstimateSize(part_entry); + pg_atomic_fetch_sub_u64(base_space, part_size); + db_entry->MemoryEstimateSub(part_size); + } + } + GlobalBaseEntry::Free(entry); +} void GlobalBaseEntry::Release() { @@ -166,8 +167,18 @@ GlobalBaseEntry *GlobalBaseDefCache::SearchReadOnly(Oid obj_oid, uint32 hash_val } template -void GlobalBaseDefCache::FreeDeadEntrys() +void GlobalBaseDefCache::FreeDeadElements() { + if (m_dead_entries.GetLength() == 0) { + return; + } + + /* only one clean is enough */ + ResourceOwnerEnlargeGlobalIsExclusive(t_thrd.utils_cxt.CurrentResourceOwner); + if (!atomic_compare_exchange_u32(&m_recovery_basedef_flag, 0, 1)) { + return; + } + ResourceOwnerRememberGlobalIsExclusive(t_thrd.utils_cxt.CurrentResourceOwner, &m_recovery_basedef_flag); while (m_dead_entries.GetLength() > 0) { Dlelem *elt = m_dead_entries.RemoveHead(); if (elt == NULL) { @@ -179,12 +190,15 @@ void GlobalBaseDefCache::FreeDeadEntrys() m_dead_entries.AddTail(&entry->cache_elem); break; } else { - entry->Free(entry); + free_dead_defentry_internal(entry, &m_base_space, m_db_entry); } } + Assert(m_recovery_basedef_flag == 1); + ResourceOwnerForgetGlobalIsExclusive(t_thrd.utils_cxt.CurrentResourceOwner, &m_recovery_basedef_flag); + atomic_compare_exchange_u32(&m_recovery_basedef_flag, 1, 0); } -template void GlobalBaseDefCache::FreeDeadEntrys(); -template void GlobalBaseDefCache::FreeDeadEntrys(); +template void GlobalBaseDefCache::FreeDeadElements(); +template void GlobalBaseDefCache::FreeDeadElements(); template void GlobalBaseDefCache::Invalidate(Oid dbid, Oid obj_oid) @@ -268,9 +282,9 @@ template void GlobalBaseDefCache::ResetCaches(); template void GlobalBaseDefCache::HandleDeadEntry(GlobalBaseEntry *entry) { - RemoveElemFromBucket(entry); + m_bucket_list.RemoveElemFromBucket(&entry->cache_elem); if (entry->refcount == 0) { - m_dead_entries.AddHead(&entry->cache_elem); + free_dead_defentry_internal(entry, &m_base_space, m_db_entry); } else { m_dead_entries.AddTail(&entry->cache_elem); } @@ -396,4 +410,5 @@ GlobalBaseDefCache::GlobalBaseDefCache(Oid db_oid, bool is_shared, GlobalSysDBCa m_base_space = 0; m_obj_locks = NULL; m_db_entry = entry; + m_recovery_basedef_flag = 0; } diff --git a/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp b/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp index cf9f8d8310..135b526030 100644 --- a/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp +++ b/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp @@ -39,7 +39,9 @@ template void GlobalSysDBCacheEntry::ResetDBCache() { m_systabCache->ResetCatCaches(); + m_systabCache->FreeDeadElements(); m_tabdefCache->ResetRelCaches(); + m_tabdefCache->FreeDeadElements(); if (m_dbOid != InvalidOid) { m_partdefCache->ResetPartCaches(); } @@ -48,9 +50,12 @@ void GlobalSysDBCacheEntry::ResetDBCache() void GlobalSysDBCacheEntry::RemoveTailElements() { m_systabCache->RemoveAllTailElements(); + m_systabCache->FreeDeadElements(); m_tabdefCache->RemoveAllTailElements(); + m_tabdefCache->FreeDeadElements(); if (m_dbOid != InvalidOid) { m_partdefCache->RemoveAllTailElements(); + m_partdefCache->FreeDeadElements(); } } diff --git a/src/common/backend/utils/cache/knl_globalpartdefcache.cpp b/src/common/backend/utils/cache/knl_globalpartdefcache.cpp index 00412ad712..7851046481 100644 --- a/src/common/backend/utils/cache/knl_globalpartdefcache.cpp +++ b/src/common/backend/utils/cache/knl_globalpartdefcache.cpp @@ -77,7 +77,7 @@ void GlobalPartDefCache::Insert(Partition part, uint32 hash_value) bool found = GlobalBaseDefCache::EntryExist(part->pd_id, hash_index); if (found) { PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); - entry->Free(entry); + GlobalBaseEntry::Free(entry); return; } GlobalBaseDefCache::AddHeadToBucket(hash_index, entry); diff --git a/src/common/backend/utils/cache/knl_globalsysdbcache.cpp b/src/common/backend/utils/cache/knl_globalsysdbcache.cpp index 8bd77754c3..e39cf3e78d 100644 --- a/src/common/backend/utils/cache/knl_globalsysdbcache.cpp +++ b/src/common/backend/utils/cache/knl_globalsysdbcache.cpp @@ -86,14 +86,6 @@ void GlobalSysDBCache::ReleaseGSCEntry(GlobalSysDBCacheEntry *entry) pg_atomic_fetch_sub_u64(&entry->m_refcount, 1); } -void GlobalSysDBCache::RemoveElemFromBucket(GlobalSysDBCacheEntry *entry) -{ - /* shared db never remove */ - Assert(entry->m_dbOid != InvalidOid); - m_bucket_list.RemoveElemFromBucket(&entry->m_cache_elem); - m_dbstat_manager.RecordSwapOutDBEntry(entry); -} - void GlobalSysDBCache::AddHeadToBucket(Index hash_index, GlobalSysDBCacheEntry *entry) { m_bucket_list.AddHeadToBucket(hash_index, &entry->m_cache_elem); @@ -103,11 +95,22 @@ void GlobalSysDBCache::AddHeadToBucket(Index hash_index, GlobalSysDBCacheEntry * m_dbstat_manager.ThreadHoldDB(entry); } +static void free_dead_dbs_internal(GlobalSysDBCacheEntry *entry) +{ + /* sub all to delete, make sure no one use the entry */ + entry->MemoryEstimateSub(entry->m_rough_used_space); + Assert(entry->m_rough_used_space == 0); + GlobalSysDBCacheEntry::Free(entry); +} + void GlobalSysDBCache::HandleDeadDB(GlobalSysDBCacheEntry *entry) { - RemoveElemFromBucket(entry); + /* shared db never remove */ + Assert(entry->m_dbOid != InvalidOid); + m_bucket_list.RemoveElemFromBucket(&entry->m_cache_elem); + m_dbstat_manager.RecordSwapOutDBEntry(entry); if (entry->m_refcount == 0) { - m_dead_dbs.AddHead(&entry->m_cache_elem); + free_dead_dbs_internal(entry); } else { m_dead_dbs.AddTail(&entry->m_cache_elem); } @@ -131,10 +134,7 @@ void GlobalSysDBCache::FreeDeadDBs() m_dead_dbs.AddTail(&dbEntry->m_cache_elem); break; } else { - /* sub all to delete, make sure no one use the entry */ - dbEntry->MemoryEstimateSub(dbEntry->m_rough_used_space); - Assert(dbEntry->m_rough_used_space == 0); - dbEntry->Free(dbEntry); + free_dead_dbs_internal(dbEntry); } } } @@ -179,7 +179,7 @@ GlobalSysDBCacheEntry *GlobalSysDBCache::SearchGSCEntry(Oid db_id, Index hash_in if (existDbEntry != NULL) { m_dbstat_manager.ThreadHoldDB(existDbEntry); PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); - newDbEntry->Free(newDbEntry); + GlobalSysDBCacheEntry::Free(newDbEntry); return existDbEntry; } diff --git a/src/common/backend/utils/cache/knl_globalsystupcache.cpp b/src/common/backend/utils/cache/knl_globalsystupcache.cpp index 7bec4deef4..a19104cb60 100644 --- a/src/common/backend/utils/cache/knl_globalsystupcache.cpp +++ b/src/common/backend/utils/cache/knl_globalsystupcache.cpp @@ -149,7 +149,7 @@ void GlobalCatCList::Release() void GlobalSysTupCache::ReleaseGlobalCatCTup(GlobalCatCTup *ct) { if (unlikely(!ct->canInsertGSC)) { - pfree(ct); + GlobalCatCTup::Free(ct); return; } (void)pg_atomic_fetch_sub_u64(&ct->refcount, 1); @@ -158,7 +158,7 @@ void GlobalSysTupCache::ReleaseGlobalCatCTup(GlobalCatCTup *ct) void GlobalSysTupCache::ReleaseGlobalCatCList(GlobalCatCList *cl) { if (unlikely(!cl->canInsertGSC)) { - FreeGlobalCatCList(cl); + GlobalCatCList::Free(cl); return; } (void)pg_atomic_fetch_sub_u64(&cl->refcount, 1); @@ -185,12 +185,13 @@ void GlobalSysTupCache::AddHeadToCCList(GlobalCatCList *cl) pg_atomic_fetch_add_u64(m_tup_space, cl_size); DLAddHead(&cc_lists, &cl->cache_elem); } -void GlobalSysTupCache::RemoveElemFromCCList(GlobalCatCList *cl) + +static void free_dead_cl_internal(GlobalCatCList *cl, volatile uint64 *tup_space, GlobalSysDBCacheEntry *entry) { uint64 cl_size = GetClEstimateSize(cl); - m_dbEntry->MemoryEstimateSub(cl_size); - pg_atomic_fetch_sub_u64(m_tup_space, cl_size); - DLRemove(&cl->cache_elem); + entry->MemoryEstimateSub(cl_size); + pg_atomic_fetch_sub_u64(tup_space, cl_size); + GlobalCatCList::Free(cl); } void GlobalSysTupCache::AddHeadToBucket(Index hash_index, GlobalCatCTup *ct) @@ -203,23 +204,23 @@ void GlobalSysTupCache::AddHeadToBucket(Index hash_index, GlobalCatCTup *ct) DLAddHead(&cc_buckets[hash_index], &ct->cache_elem); } -void GlobalSysTupCache::RemoveElemFromBucket(GlobalCatCTup *ct) +static void free_dead_ct_internal(GlobalCatCTup *ct, volatile uint64 *tup_space, GlobalSysDBCacheEntry *entry) { uint64 ct_size = GetCtEstimateSize(ct); - pg_atomic_fetch_sub_u64(m_tup_space, ct_size); + pg_atomic_fetch_sub_u64(tup_space, ct_size); /* free space of tup */ - m_dbEntry->MemoryEstimateSub(ct_size); - pg_atomic_fetch_sub_u64(m_tup_count, 1); - DLRemove(&ct->cache_elem); + entry->MemoryEstimateSub(ct_size); + GlobalCatCTup::Free(ct); } void GlobalSysTupCache::HandleDeadGlobalCatCTup(GlobalCatCTup *ct) { /* this func run in wr lock, so dont call free directly */ - RemoveElemFromBucket(ct); + pg_atomic_fetch_sub_u64(m_tup_count, 1); + DLRemove(&ct->cache_elem); ct->dead = true; if (ct->refcount == 0) { - m_dead_cts.AddHead(&ct->cache_elem); + free_dead_ct_internal(ct, m_tup_space, m_dbEntry); } else { m_dead_cts.AddTail(&ct->cache_elem); } @@ -238,7 +239,7 @@ void GlobalSysTupCache::FreeDeadCts() m_dead_cts.AddTail(&ct->cache_elem); break; } else { - pfree(ct); + free_dead_ct_internal(ct, m_tup_space, m_dbEntry); } } } @@ -283,22 +284,22 @@ void GlobalSysTupCache::RemoveTailTupleElements(Index hash_index) ResourceOwnerForgetGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_tup_swappingouts[hash_index]); } -void GlobalSysTupCache::FreeGlobalCatCList(GlobalCatCList *cl) +void GlobalCatCList::Free(GlobalCatCList *cl) { Assert(cl->refcount == 0 || !cl->canInsertGSC); for (int i = 0; i < cl->n_members; i++) { cl->members[i]->Release(); } - CatCacheFreeKeys(m_relinfo.cc_tupdesc, cl->nkeys, m_relinfo.cc_keyno, cl->keys); - pfree(cl); + CatCacheFreeKeys(cl->my_cache->GetCCTupleDesc(), cl->nkeys, cl->my_cache->GetCCKeyno(), cl->keys); + pfree_ext(cl); } void GlobalSysTupCache::HandleDeadGlobalCatCList(GlobalCatCList *cl) { /* this func run in wr lock, so dont call free directly */ - RemoveElemFromCCList(cl); + DLRemove(&cl->cache_elem); if (cl->refcount == 0) { - m_dead_cls.AddHead(&cl->cache_elem); + free_dead_cl_internal(cl, m_tup_space, m_dbEntry); } else { m_dead_cls.AddTail(&cl->cache_elem); } @@ -317,7 +318,7 @@ void GlobalSysTupCache::FreeDeadCls() m_dead_cls.AddTail(&cl->cache_elem); break; } else { - FreeGlobalCatCList(cl); + free_dead_cl_internal(cl, m_tup_space, m_dbEntry); } } FreeDeadCts(); @@ -581,7 +582,7 @@ GlobalCatCTup *GlobalSysTupCache::InsertHeapTupleIntoGlobalCatCache(InsertCatTup if (unlikely(ct != NULL)) { /* other thread has inserted one */ PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); - pfree_ext(new_ct); + GlobalCatCTup::Free(new_ct); return ct; } @@ -1047,7 +1048,7 @@ GlobalCatCList *GlobalSysTupCache::InsertListIntoCatCacheList(InsertCatListInfo ResourceOwnerRememberGlobalCatCList(LOCAL_SYSDB_RESOWNER, exist_cl); /* we need mark clist's refcont to 0 then do real free up. */ cl->refcount = 0; - FreeGlobalCatCList(cl); + GlobalCatCList::Free(cl); cl = exist_cl; } else { AddHeadToCCList(cl); diff --git a/src/common/backend/utils/cache/knl_globaltabdefcache.cpp b/src/common/backend/utils/cache/knl_globaltabdefcache.cpp index 43f32da295..51cc45db95 100644 --- a/src/common/backend/utils/cache/knl_globaltabdefcache.cpp +++ b/src/common/backend/utils/cache/knl_globaltabdefcache.cpp @@ -515,7 +515,7 @@ void GlobalTabDefCache::Insert(Relation rel, uint32 hash_value) bool found = GlobalBaseDefCache::EntryExist(rel->rd_id, hash_index); if (found) { PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_obj_locks[hash_index]); - entry->Free(entry); + GlobalBaseEntry::Free(entry); return; } diff --git a/src/common/backend/utils/resowner/resowner.cpp b/src/common/backend/utils/resowner/resowner.cpp index 5e8c7b6f17..254896b9b4 100755 --- a/src/common/backend/utils/resowner/resowner.cpp +++ b/src/common/backend/utils/resowner/resowner.cpp @@ -2349,7 +2349,7 @@ void ResourceOwnerReleaseGlobalBaseEntry(ResourceOwner owner, bool isCommit) } if (unlikely(entry->refcount == 0)) { /* palloc fail */ - entry->FreeError(); + GlobalBaseEntry::Free(entry); } else { entry->Release(); } diff --git a/src/include/utils/knl_globalbasedefcache.h b/src/include/utils/knl_globalbasedefcache.h index b6952a5e63..8e1854b8e4 100644 --- a/src/include/utils/knl_globalbasedefcache.h +++ b/src/include/utils/knl_globalbasedefcache.h @@ -72,6 +72,7 @@ public: } template void RemoveAllTailElements(); + template void FreeDeadElements(); /* free elem */ protected: /* base class initialization funciton */ @@ -101,14 +102,12 @@ protected: /* fucntions to remove/free elem from GSC hashtable */ template void HandleDeadEntry(GlobalBaseEntry *entry); /* remove from hashtable */ - template void FreeDeadEntrys(); /* free elem */ /* function to handle GSC memory swapout */ template void ResetCaches(); template void RemoveTailElements(Index hash_index); /* function to add/remove elem to GSC hashtable */ - template void RemoveElemFromBucket(GlobalBaseEntry *base); template void AddHeadToBucket(Index hash_index, GlobalBaseEntry *base); /* GSC Identifier fields */ @@ -120,6 +119,7 @@ protected: char m_relkind; /* dev-debug only, no real process so far */ volatile uint32 *m_is_swappingouts; + volatile uint32 m_recovery_basedef_flag; /* GSC statistic information, assigned from GlobalSysCacheStat class */ volatile uint64 *m_searches; diff --git a/src/include/utils/knl_globalpartdefcache.h b/src/include/utils/knl_globalpartdefcache.h index 848f1a2010..9611dad253 100644 --- a/src/include/utils/knl_globalpartdefcache.h +++ b/src/include/utils/knl_globalpartdefcache.h @@ -48,7 +48,7 @@ public: */ GlobalPartitionEntry *SearchReadOnly(Oid partRelOid, uint32 hash_value) { - GlobalBaseDefCache::FreeDeadEntrys(); + GlobalBaseDefCache::FreeDeadElements(); GlobalPartitionEntry *entry = (GlobalPartitionEntry *)GlobalBaseDefCache::SearchReadOnly(partRelOid, hash_value); return entry; @@ -60,7 +60,7 @@ public: return; } GlobalBaseDefCache::ResetCaches(); - GlobalBaseDefCache::FreeDeadEntrys(); + GlobalBaseDefCache::FreeDeadElements(); } inline void Invalidate(Oid dbOid, Oid partRelOid) diff --git a/src/include/utils/knl_globalsyscache_common.h b/src/include/utils/knl_globalsyscache_common.h index c030ecd5e6..e7e6f0d9f4 100644 --- a/src/include/utils/knl_globalsyscache_common.h +++ b/src/include/utils/knl_globalsyscache_common.h @@ -76,18 +76,7 @@ struct GlobalBaseEntry { volatile uint64 refcount; Dlelem cache_elem; void Release(); - template static void Free(GlobalBaseEntry *entry); - - void FreeError() - { - if (type == GLOBAL_RELATION_ENTRY) { - Free(this); - } else { - Assert(type == GLOBAL_PARTITION_ENTRY); - Free(this); - } - } }; struct GlobalRelationEntry : public GlobalBaseEntry { Relation rel; diff --git a/src/include/utils/knl_globalsysdbcache.h b/src/include/utils/knl_globalsysdbcache.h index 1cc96dcb19..021c3d3c98 100644 --- a/src/include/utils/knl_globalsysdbcache.h +++ b/src/include/utils/knl_globalsysdbcache.h @@ -186,8 +186,6 @@ private: void InitRelForInitSysCacheFlag(); void InitSysCacheRelIds(); - void RemoveElemFromBucket(GlobalSysDBCacheEntry *entry); - void AddHeadToBucket(Index hash_index, GlobalSysDBCacheEntry *entry); /* Flag to indicate if inited */ diff --git a/src/include/utils/knl_globalsystabcache.h b/src/include/utils/knl_globalsystabcache.h index 50325e742e..066ae5d113 100644 --- a/src/include/utils/knl_globalsystabcache.h +++ b/src/include/utils/knl_globalsystabcache.h @@ -65,6 +65,17 @@ public: template void ResetCatCaches(); + void FreeDeadElements() + { + for (int i = 0; i < SysCacheSize; i++) { + if (m_global_systupcaches[i] == NULL) { + continue; + } + m_global_systupcaches[i]->FreeDeadCls(); + m_global_systupcaches[i]->FreeDeadCts(); + } + } + void RemoveAllTailElements(); /* diff --git a/src/include/utils/knl_globalsystupcache.h b/src/include/utils/knl_globalsystupcache.h index a68720df60..954828a626 100644 --- a/src/include/utils/knl_globalsystupcache.h +++ b/src/include/utils/knl_globalsystupcache.h @@ -103,6 +103,10 @@ struct GlobalCatCTup { HeapTupleData tuple; /* tuple management header */ void Release(); + static void Free(GlobalCatCTup *ct) + { + pfree_ext(ct); + } }; /* @@ -133,6 +137,7 @@ struct GlobalCatCList { GlobalCatCTup *members[FLEXIBLE_ARRAY_MEMBER]; void Release(); + static void Free(GlobalCatCList *cl); }; /* @@ -305,7 +310,10 @@ public: { return m_relinfo.cc_reloid; } - + inline int *GetCCKeyno() + { + return m_relinfo.cc_keyno; + } void Init(); inline bool Inited() { @@ -323,7 +331,8 @@ public: uint32 hash_value, Datum *arguments, oidvector* argModes, bool is_disposable); GlobalCatCTup *SearchTupleMissWithArgModes(InsertCatTupInfo *tup_info, oidvector* argModes); #endif - + void FreeDeadCts(); + void FreeDeadCls(); bool enable_rls; private: /* @@ -375,16 +384,12 @@ private: void SearchBuiltinProcCacheList(InsertCatListInfo *list_info); GlobalCatCList *FindListInternal(uint32 hash_value, int nkeys, Datum *arguments, int *location); - void FreeDeadCts(); void HandleDeadGlobalCatCTup(GlobalCatCTup *ct); void RemoveTailTupleElements(Index hash_index); - void FreeDeadCls(); void HandleDeadGlobalCatCList(GlobalCatCList *cl); void RemoveTailListElements(); - void FreeGlobalCatCList(GlobalCatCList *cl); - /* when initdb, this func call first */ void InitCacheInfo(Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets); void InitHashTable(); @@ -403,10 +408,8 @@ private: } void AddHeadToCCList(GlobalCatCList *cl); - void RemoveElemFromCCList(GlobalCatCList *cl); void AddHeadToBucket(Index hash_index, GlobalCatCTup *ct); - void RemoveElemFromBucket(GlobalCatCTup *ct); /* Global cache identifier */ Oid m_dbOid; diff --git a/src/include/utils/knl_globaltabdefcache.h b/src/include/utils/knl_globaltabdefcache.h index ddfb6cc1d5..c8e8d14a60 100644 --- a/src/include/utils/knl_globaltabdefcache.h +++ b/src/include/utils/knl_globaltabdefcache.h @@ -42,7 +42,7 @@ public: GlobalRelationEntry *SearchReadOnly(Oid relOid, uint32 hash_value) { - GlobalBaseDefCache::FreeDeadEntrys(); + GlobalBaseDefCache::FreeDeadElements(); GlobalRelationEntry *entry = (GlobalRelationEntry *)GlobalBaseDefCache::SearchReadOnly(relOid, hash_value); return entry; @@ -55,7 +55,7 @@ public: return; } GlobalBaseDefCache::ResetCaches(); - GlobalBaseDefCache::FreeDeadEntrys(); + GlobalBaseDefCache::FreeDeadElements(); } inline uint64 GetSysCacheSpaceNum() -- Gitee From eb4932d2334e0cd8414c16410449b24336db28f3 Mon Sep 17 00:00:00 2001 From: Hemny Date: Tue, 6 Aug 2024 17:22:09 +0800 Subject: [PATCH 150/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddeserialization=5Fto?= =?UTF-8?q?=5Ftuple=E6=96=B9=E6=B3=95=E7=9A=84=E5=86=85=E5=AD=98=E6=B3=84?= =?UTF-8?q?=E9=9C=B2=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/postmaster/pgaudit.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp index 89ac684e32..5fbc36b00c 100755 --- a/src/gausskernel/process/postmaster/pgaudit.cpp +++ b/src/gausskernel/process/postmaster/pgaudit.cpp @@ -3007,12 +3007,12 @@ static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS_NEW], } values[i++] = BoolGetDatum(verifyResult); /* verify_result */ } else { - values[i++] = CStringGetTextDatum(FILED_NULLABLE(NULL)); /* verify_result*/ - nulls[i] = true; + values[i] = CStringGetTextDatum(FILED_NULLABLE(NULL)); /* verify_result */ + nulls[i++] = true; } } else { - values[i++] = CStringGetTextDatum(FILED_NULLABLE(NULL)); /* verify_result*/ - nulls[i] = true; + values[i] = CStringGetTextDatum(FILED_NULLABLE(NULL)); /* verify_result */ + nulls[i++] = true; } } if (newVersion) { -- Gitee From ade37ec73e22318af53666a302314b07074952c3 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Tue, 6 Aug 2024 15:40:58 +0800 Subject: [PATCH 151/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dgreatest=E4=BD=9C?= =?UTF-8?q?=E4=B8=BA=E8=A1=A8=E5=87=BD=E6=95=B0=E7=9A=84core=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_expr.cpp | 7 ++++++- src/common/backend/parser/parse_relation.cpp | 8 +++----- src/gausskernel/optimizer/plan/createplan.cpp | 2 +- src/gausskernel/optimizer/plan/streamplan_utils.cpp | 2 +- src/gausskernel/process/stream/execStream.cpp | 2 +- src/include/parser/parse_expr.h | 2 +- src/test/regress/expected/hw_datatype_2.out | 6 ++++++ src/test/regress/sql/hw_datatype_2.sql | 2 ++ 8 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index b7b881b98d..3d61bdf047 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -4156,8 +4156,13 @@ static Node *transformStartWithWhereClauseColumnRef(ParseState *pstate, ColumnRe return NULL; } -PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr, CursorExpression** ce) +PlannedStmt* getCursorStreamFromFuncArg(Node* node, CursorExpression** ce) { + if (!IsA(node, FuncExpr)) { + return NULL; + } + + FuncExpr* funcexpr = (FuncExpr*)node; ListCell* lc = NULL; foreach (lc, funcexpr->args) { Node* arg = (Node*)lfirst(lc); diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp index f92c68b207..1e7660fe64 100755 --- a/src/common/backend/parser/parse_relation.cpp +++ b/src/common/backend/parser/parse_relation.cpp @@ -1792,11 +1792,9 @@ RangeTblEntry* addRangeTableEntryForFunction( * create_functionscan_path need cursorDop to determine * wheather functionscan smp or not. */ - if (IsA(funcexpr, FuncExpr)) { - PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr); - if (cursorPstmt != NULL && IsA(cursorPstmt->planTree, Stream)) { - rte->cursorDop = cursorPstmt->planTree->lefttree->dop; - } + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg(funcexpr); + if (cursorPstmt != NULL && IsA(cursorPstmt->planTree, Stream)) { + rte->cursorDop = cursorPstmt->planTree->lefttree->dop; } eref = makeAlias(alias ? alias->aliasname : funcname, NIL); diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index 26683e7460..2e25e066fc 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -6246,7 +6246,7 @@ static FunctionScan* make_functionscan(List* qptlist, List* qpqual, Index scanre node->funccolcollations = funccolcollations; CursorExpression* ce = NULL; - PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)funcexpr, &ce); + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg(funcexpr, &ce); if (cursorPstmt == NULL) { return node; } diff --git a/src/gausskernel/optimizer/plan/streamplan_utils.cpp b/src/gausskernel/optimizer/plan/streamplan_utils.cpp index 50ca0f152c..3780cf127f 100755 --- a/src/gausskernel/optimizer/plan/streamplan_utils.cpp +++ b/src/gausskernel/optimizer/plan/streamplan_utils.cpp @@ -1898,7 +1898,7 @@ void finalize_node_id(Plan* result_plan, int* plan_node_id, int* parent_node_id, } } break; case T_FunctionScan: { - PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)((FunctionScan*)result_plan)->funcexpr); + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg(((FunctionScan*)result_plan)->funcexpr); if (cursorPstmt != NULL) { cursorPstmt->planTree->cursor_owner_node_id = result_plan->plan_node_id; set_stream_plan_cursor_walker(cursorPstmt->planTree); diff --git a/src/gausskernel/process/stream/execStream.cpp b/src/gausskernel/process/stream/execStream.cpp index 9416ca1351..5cec707b71 100755 --- a/src/gausskernel/process/stream/execStream.cpp +++ b/src/gausskernel/process/stream/execStream.cpp @@ -971,7 +971,7 @@ static void InitStreamFlow(StreamFlowCtl* ctl) InitStreamFlow(ctl); } break; case T_FunctionScan: { - PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg((FuncExpr*)((FunctionScan*)oldPlan)->funcexpr); + PlannedStmt* cursorPstmt = getCursorStreamFromFuncArg(((FunctionScan*)oldPlan)->funcexpr); if (cursorPstmt != NULL) { ctl->plan = cursorPstmt->planTree; diff --git a/src/include/parser/parse_expr.h b/src/include/parser/parse_expr.h index 1b9c1a9298..fd8b6b2d36 100644 --- a/src/include/parser/parse_expr.h +++ b/src/include/parser/parse_expr.h @@ -39,6 +39,6 @@ extern bool IsQuerySWCBRewrite(Query *query); extern bool IsSWCBRewriteRTE(RangeTblEntry *rte); extern Datum GetTypeZeroValue(Form_pg_attribute att_tup); typedef Datum (*getTypeZeroValueFunc)(Form_pg_attribute att_tup); -extern PlannedStmt* getCursorStreamFromFuncArg(FuncExpr* funcexpr, CursorExpression** ce = NULL); +extern PlannedStmt* getCursorStreamFromFuncArg(Node* node, CursorExpression** ce = NULL); #endif /* PARSE_EXPR_H */ diff --git a/src/test/regress/expected/hw_datatype_2.out b/src/test/regress/expected/hw_datatype_2.out index 191bf070c7..9ebab0daa3 100644 --- a/src/test/regress/expected/hw_datatype_2.out +++ b/src/test/regress/expected/hw_datatype_2.out @@ -808,6 +808,12 @@ select least(0.9, 1); .9 (1 row) +select * from greatest(1, 1.1); + greatest +---------- + 1.1 +(1 row) + create or replace function test_cast(a numeric) returns int as $$ diff --git a/src/test/regress/sql/hw_datatype_2.sql b/src/test/regress/sql/hw_datatype_2.sql index e42098c157..241e27d494 100644 --- a/src/test/regress/sql/hw_datatype_2.sql +++ b/src/test/regress/sql/hw_datatype_2.sql @@ -319,6 +319,8 @@ select greatest(1.1, 1); select least(1, 0.9); select least(0.9, 1); +select * from greatest(1, 1.1); + create or replace function test_cast(a numeric) returns int as $$ -- Gitee From 705b9473ff314faa0070b979a4bb000d763e261c Mon Sep 17 00:00:00 2001 From: q00421813 Date: Sun, 4 Aug 2024 21:04:40 +0800 Subject: [PATCH 152/347] =?UTF-8?q?1.=E4=BF=AE=E5=A4=8D=E6=8C=87=E9=92=88?= =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96bug=202.=E5=88=A0=E9=99=A4=E6=97=A0?= =?UTF-8?q?=E7=94=A8=E5=88=86=E6=94=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/common/reloptions.cpp | 2 +- .../storage/access/ustore/knl_pruneuheap.cpp | 41 ++++++++----------- .../storage/access/ustore/knl_uheap.cpp | 2 +- .../storage/access/ustore/knl_undoaction.cpp | 6 +-- .../access/ustore/knl_undolauncher.cpp | 10 ++--- .../storage/access/ustore/knl_uundorecord.cpp | 6 +++ 6 files changed, 32 insertions(+), 35 deletions(-) diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index a246572877..8ebd16f043 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -3231,7 +3231,6 @@ static void SetUstoreDefaultFillfactor(void *rdopts, relopt_value *options, for (int i = 0; i < numoptions; i++) { if (ff_options_idx == -1 && pg_strcasecmp("fillfactor", options[i].gen->name) == 0) { ff_options_idx = i; - break; } } @@ -3242,6 +3241,7 @@ static void SetUstoreDefaultFillfactor(void *rdopts, relopt_value *options, } if (storage_type_idx == -1 && pg_strcasecmp("storage_type", elems[i].optname) == 0) { storage_type_idx = i; + continue; } } diff --git a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp index 90547868d3..34129979db 100644 --- a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp @@ -535,32 +535,27 @@ int UHeapPagePruneGuts(Relation relation, const RelationBuffer *relbuf, Transact * strategy to rearrange the page where we anyway need to traverse all * rows. */ - if (forcePrune && !UPageIsPrunableWithXminHorizon(page, oldestXmin)) { - ; /* no need to scan */ - } else { - /* Scan the page */ - maxoff = UHeapPageGetMaxOffsetNumber(page); - for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - RowPtr *itemid = NULL; + maxoff = UHeapPageGetMaxOffsetNumber(page); + for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { + RowPtr *itemid = NULL; - /* Ignore items already processed as part of an earlier chain */ - if (prstate.marked[offnum]) { - continue; - } - - /* - * Nothing to do if slot is empty, already dead or marked as - * deleted. - */ - itemid = UPageGetRowPtr(page, offnum); - if (!RowPtrIsUsed(itemid) || RowPtrIsDead(itemid) || RowPtrIsDeleted(itemid)) { - continue; - } + /* Ignore items already processed as part of an earlier chain */ + if (prstate.marked[offnum]) { + continue; + } - /* Process this item */ - ndeleted += UHeapPruneItem(relbuf, offnum, oldestXmin, &prstate, &spaceFreed, - (offnum == targetOffnum)); + /* + * Nothing to do if slot is empty, already dead or marked as + * deleted. + */ + itemid = UPageGetRowPtr(page, offnum); + if (!RowPtrIsUsed(itemid) || RowPtrIsDead(itemid) || RowPtrIsDeleted(itemid)) { + continue; } + + /* Process this item */ + ndeleted += UHeapPruneItem(relbuf, offnum, oldestXmin, &prstate, &spaceFreed, + (offnum == targetOffnum)); } /* diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 2aa13218a4..1e5983cc9e 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -3897,7 +3897,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId * Try to extend the ITL array now. */ if (urecPtr != NULL) { - urecPtr = INVALID_UNDO_REC_PTR; + *urecPtr = INVALID_UNDO_REC_PTR; } nExtended = UPageExtendTDSlots(relation, buf); diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index 1d7d4315f1..ac98c2aa7a 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -76,7 +76,7 @@ bool VerifyAndDoUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoR /* already processed. */ if (rc != UNDO_TRAVERSAL_COMPLETE) { ereport(ERROR, (errmodule(MOD_USTORE), - errmsg("[Rollbakc Skip]: xid(%lu), toUrecptr(%lu), fromUrecptr(%lu), rc(%d)", + errmsg("[Rollback Skip]: xid(%lu), toUrecptr(%lu), fromUrecptr(%lu), rc(%d)", fullXid, toUrecptr, fromUrecptr, rc))); return false; } @@ -105,7 +105,7 @@ bool VerifyAndDoUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoR * for this transaction, otherwise we need to fetch the next batch of * the undo records. */ - if (!IS_VALID_UNDO_REC_PTR(urecPtr)){ + if (!IS_VALID_UNDO_REC_PTR(urecPtr)) { break; } @@ -117,7 +117,7 @@ bool VerifyAndDoUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoR URecVector *urecvec = FetchUndoRecordRange(&urecPtr, toUrecptr, undoApplySize, false); if (urecvec->Size() == 0){ ereport(ERROR, (errmodule(MOD_USTORE), - errmsg("[Rollbakc Skip]: xid(%lu), toUrecptr(%lu), fromUrecptr(%lu)", + errmsg("[Rollback Skip]: xid(%lu), toUrecptr(%lu), fromUrecptr(%lu)", fullXid, toUrecptr, fromUrecptr))); break; } diff --git a/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp b/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp index bc7ccdaed2..7d71ab074d 100644 --- a/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp @@ -46,7 +46,6 @@ #include "gssignal/gs_signal.h" #include "access/ustore/knl_undoworker.h" #include "access/ustore/knl_undorequest.h" -#include "access/gtm.h" #define InvalidPid ((ThreadId)(-1)) @@ -197,11 +196,8 @@ void UndoWorkerShmemInit(void) } } -void UndoLuncherQuitAndClean(int code, Datum arg) +void UndoLauncherQuitAndClean(int code, Datum arg) { -#ifdef ENABLE_MULTIPLE_NODES - CloseGTM(); -#endif ereport(LOG, (errmsg("undo launcher shutting down"))); t_thrd.undolauncher_cxt.UndoWorkerShmem->undo_launcher_pid = 0; DisownLatch(&t_thrd.undolauncher_cxt.UndoWorkerShmem->latch); @@ -267,10 +263,10 @@ NON_EXEC_STATIC void UndoLauncherMain() t_thrd.proc_cxt.PostInit->SetDatabaseAndUser(NULL, InvalidOid, NULL); t_thrd.proc_cxt.PostInit->InitUndoLauncher(); - on_proc_exit(UndoLuncherQuitAndClean, 0); - SetProcessingMode(NormalProcessing); + on_proc_exit(UndoLauncherQuitAndClean, 0); + /* Unblock signals (they were blocked when the postmaster forked us) */ gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); (void)gs_signal_unblock_sigusr2(); diff --git a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp index 7e18b284b4..f7f7376d64 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp @@ -128,6 +128,12 @@ void UndoRecord::Reset(UndoRecPtr urp) if (BufferIsValid(buff_)) { if (!IS_VALID_UNDO_REC_PTR(urp) || (UNDO_PTR_GET_ZONE_ID(urp) != UNDO_PTR_GET_ZONE_ID(urp_)) || (UNDO_PTR_GET_BLOCK_NUM(urp) != BufferGetBlockNumber(buff_))) { + BufferDesc *buf_desc = GetBufferDescriptor(buff_ - 1); + if (LWLockHeldByMe(buf_desc->content_lock)) { + ereport(LOG, (errmodule(MOD_UNDO), + errmsg("Release Buffer %d when Reset UndoRecord from %lu to %lu.", buff_, urp_, urp))); + LockBuffer(buff_, BUFFER_LOCK_UNLOCK); + } ReleaseBuffer(buff_); buff_ = InvalidBuffer; } -- Gitee From f5d45bdf683dbba0ff910abeabd9b9466181c36d Mon Sep 17 00:00:00 2001 From: openGaussDev Date: Tue, 6 Aug 2024 14:45:33 +0800 Subject: [PATCH 153/347] =?UTF-8?q?=20=E4=BF=AE=E5=A4=8D=E5=A4=87=E6=9C=BA?= =?UTF-8?q?=E8=AF=BB=E7=BB=84=E8=8E=B7=E5=8F=96=E5=BF=AB=E7=85=A7=E6=AD=BB?= =?UTF-8?q?=E9=94=81=E9=97=AE=E9=A2=98Offering:=20openGaussDevMore=20detai?= =?UTF-8?q?l:=20standby=20read?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Match-id-06894f7e495d72c5763653105cbd2c8eee989c81 --- src/gausskernel/storage/buffer/bufmgr.cpp | 23 +++++++++++++++++++++++ src/gausskernel/storage/ipc/procarray.cpp | 4 +++- src/include/storage/buf/bufmgr.h | 1 + 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index e062afafde..d5d15f30bf 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -4764,6 +4764,29 @@ int GetThreadBufferLeakNum(void) return refCountErrors; } +bool CheckForBufferPin(void) +{ + PrivateRefCountEntry *res = NULL; + + for (int i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++) { + res = &t_thrd.storage_cxt.PrivateRefCountArray[i]; + + if (res->buffer != InvalidBuffer) { + return true; + } + } + + if (t_thrd.storage_cxt.PrivateRefCountOverflowed) { + HASH_SEQ_STATUS hstat; + hash_seq_init(&hstat, t_thrd.storage_cxt.PrivateRefCountHash); + while ((res = (PrivateRefCountEntry *)hash_seq_search(&hstat)) != NULL) { + hash_seq_term(&hstat); + return true; + } + } + return false; +} + /* * CheckForBufferLeaks - ensure this backend holds no buffer pins * diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp index d55df51103..4f012ce0a9 100755 --- a/src/gausskernel/storage/ipc/procarray.cpp +++ b/src/gausskernel/storage/ipc/procarray.cpp @@ -2120,6 +2120,8 @@ RETRY: bool retry_get = false; uint64 retry_count = 0; const static uint64 WAIT_COUNT = 0x7FFFF; + bool get_snapshot_by_self = CheckForBufferPin() || forHSFeedBack; + /* reset xmin before acquiring lwlock, in case blocking redo */ t_thrd.pgxact->xmin = InvalidTransactionId; RETRY_GET: @@ -2184,7 +2186,7 @@ RETRY_GET: goto RETRY_GET; } #ifndef ENABLE_MULTIPLE_NODES - } else if (forHSFeedBack) { + } else if (get_snapshot_by_self) { LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); if ((t_thrd.xact_cxt.ShmemVariableCache->standbyXmin <= t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin) diff --git a/src/include/storage/buf/bufmgr.h b/src/include/storage/buf/bufmgr.h index 63a8f9bceb..2c0f6da631 100644 --- a/src/include/storage/buf/bufmgr.h +++ b/src/include/storage/buf/bufmgr.h @@ -410,6 +410,7 @@ extern void RangeForgetBuffer(RelFileNode node, ForkNumber forkNum, BlockNumber extern void DropSegRelNodeSharedBuffer(RelFileNode node, ForkNumber forkNum); extern int GetThreadBufferLeakNum(void); +extern bool CheckForBufferPin(void); extern void flush_all_buffers(Relation rel, Oid db_id, HTAB *hashtbl = NULL); /* in localbuf.c */ extern void ForgetLocalBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum); -- Gitee From 878e53c0669f7f81b67759425bc1b8c2faab3fc9 Mon Sep 17 00:00:00 2001 From: liujinyu Date: Tue, 6 Aug 2024 13:47:52 +0800 Subject: [PATCH 154/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Drotate/not=20rotate?= =?UTF-8?q?=E4=B8=8D=E6=94=AF=E6=8C=81with=E5=AD=90=E5=8F=A5=E5=8F=8Anot?= =?UTF-8?q?=20rotate=E4=B8=8D=E6=94=AF=E6=8C=81=E5=AD=98=E5=82=A8=E8=BF=87?= =?UTF-8?q?=E7=A8=8B=E4=B8=AD=E4=BD=BF=E7=94=A8=E8=87=AA=E5=AE=9A=E4=B9=89?= =?UTF-8?q?=E5=8F=98=E9=87=8F=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/analyze.cpp | 32 +++- src/common/backend/parser/parse_clause.cpp | 2 +- .../expected/gb_ora_rotate_unrotate.out | 168 ++++++++++++++++++ src/test/regress/parallel_schedule0A | 3 + src/test/regress/parallel_schedule0C | 2 - .../regress/sql/gb_ora_rotate_unrotate.sql | 77 ++++++++ 6 files changed, 274 insertions(+), 10 deletions(-) diff --git a/src/common/backend/parser/analyze.cpp b/src/common/backend/parser/analyze.cpp index ead1a76f4a..bf2f9abef9 100644 --- a/src/common/backend/parser/analyze.cpp +++ b/src/common/backend/parser/analyze.cpp @@ -3121,9 +3121,12 @@ static List* removeTargetListByNameList(List* targetList, List* nameList) for (targetCell = list_head(targetList); targetCell; targetCell = next) { ResTarget *resTarget = (ResTarget *)lfirst(targetCell); next = lnext(targetCell); + isfind = false; if (IsA(resTarget->val, ColumnRef)) { - isfind = false; - char *colName = strVal(linitial(((ColumnRef *)resTarget->val)->fields)); + Node *field = (Node *)linitial(((ColumnRef *)resTarget->val)->fields); + if (IsA(field, A_Star)) + continue; + const char *colName = strVal(field); foreach (cell, nameList) { if (strcmp(strVal((Value *)lfirst(cell)), colName) == 0) { targetList = list_delete_cell(targetList, targetCell, prev); @@ -3131,9 +3134,9 @@ static List* removeTargetListByNameList(List* targetList, List* nameList) break; } } - if (!isfind) - prev = targetCell; } + if (!isfind) + prev = targetCell; } return targetList; } @@ -3149,7 +3152,12 @@ static Query* transformUnrotateStmt(ParseState* pstate, SelectStmt* stmt) int in_counter = 0; List *targetList = NIL; List *aStarList = NIL; - ParseState *pstate1 = make_parsestate(NULL); + + if (stmt->withClause) { + WithClause *withclause = (WithClause *)copyObject(stmt->withClause); + (void)transformWithClause(pstate, withclause); + } + ParseState *pstate1 = make_parsestate(pstate); pstate1->p_sourcetext = pstrdup(pstate->p_sourcetext); transformFromClause(pstate1, stmt->fromClause); @@ -3159,9 +3167,11 @@ static Query* transformUnrotateStmt(ParseState* pstate, SelectStmt* stmt) appendStringInfo(&from_clause_sql, " FROM %s ", quote_identifier(rte->relname)); else if (RTE_SUBQUERY == rte->rtekind) { StringInfo select_sql = makeStringInfo(); - deparse_query(rte->subquery, select_sql, NIL, false, false); + deparse_query(rte->subquery, select_sql, NIL, false, false, (void*)pstate1->p_ref_hook_state); appendStringInfo(&from_clause_sql, " FROM (%s) ", select_sql->data); DestroyStringInfo(select_sql); + } else if (RTE_CTE == rte->rtekind) { + appendStringInfo(&from_clause_sql, " FROM %s ", quote_identifier(rte->ctename)); } else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("NOT ROTATE in from clause error"))); } @@ -3205,7 +3215,10 @@ static Query* transformUnrotateStmt(ParseState* pstate, SelectStmt* stmt) } for (targetCell = list_head(stmt_targetList); targetCell; targetCell = next) { ResTarget *rt = (ResTarget *)lfirst(targetCell); - char *colName1 = strVal((Value *)linitial(((ColumnRef *)rt->val)->fields)); + Node *field = (Node *)linitial(((ColumnRef *)rt->val)->fields); + if (IsA(field, A_Star)) + continue; + const char *colName1 = strVal((Value *)field); next = lnext(targetCell); if (strcmp(colName1, colName) == 0) stmt_targetList = list_delete_cell(stmt_targetList, targetCell, prev); @@ -3347,6 +3360,11 @@ static Query* transformUnrotateStmt(ParseState* pstate, SelectStmt* stmt) list_free_ext(stmt->unrotateInfo->inExprList); pfree_ext(stmt->unrotateInfo); + list_free(pstate->p_ctenamespace); + pstate->p_ctenamespace = NIL; + list_free(pstate->p_future_ctes); + pstate->p_future_ctes = NIL; + return transformStmt(pstate, (Node *)stmt); } diff --git a/src/common/backend/parser/parse_clause.cpp b/src/common/backend/parser/parse_clause.cpp index 31fb09798c..b89ac22c88 100644 --- a/src/common/backend/parser/parse_clause.cpp +++ b/src/common/backend/parser/parse_clause.cpp @@ -629,7 +629,7 @@ static RangeTblEntry* transformRangeSubselect(ParseState* pstate, RangeSubselect prev = NULL; if (1 == list_length(subQueryStmt->targetList)) { List *wholeTarget = NIL; - ParseState *pstate1 = make_parsestate(NULL); + ParseState *pstate1 = make_parsestate(pstate); pstate1->p_sourcetext = pstate->p_sourcetext; transformFromClause(pstate1, subQueryStmt->fromClause); diff --git a/src/test/regress/expected/gb_ora_rotate_unrotate.out b/src/test/regress/expected/gb_ora_rotate_unrotate.out index 1864323e51..bf5b7afbf6 100644 --- a/src/test/regress/expected/gb_ora_rotate_unrotate.out +++ b/src/test/regress/expected/gb_ora_rotate_unrotate.out @@ -12,6 +12,17 @@ select * from ( select year, order_mode, order_total from original_orders) rotat 2022 | 5000 | (3 rows) +with tt as ( + select year, order_mode, order_total from original_orders +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; + year | store | internet +------+-------+---------- + 2020 | 5500 | 1000 + 2021 | 1000 | 1000 + 2022 | 5000 | +(3 rows) + select * from (select year, order_mode, order_total from original_orders) rotate (sum(order_total) for order_mode in ('online' as internet )) order by year; year | internet ------+---------- @@ -32,6 +43,19 @@ select * from rotate_orders not rotate ( yearly_total for order_mode in ( store 2021 | online | 1000 (5 rows) +with tt as ( + select * from rotate_orders +) +select * from tt not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')) order by year; + year | order_mode | yearly_total +------+------------+-------------- + 2020 | direct | 5500 + 2020 | online | 1000 + 2021 | direct | 1000 + 2021 | online | 1000 + 2022 | direct | 5000 +(5 rows) + select * from rotate_orders not rotate exclude nulls (yearly_total for order_mode in ( store as 'direct', internet as 'online')); year | order_mode | yearly_total ------+------------+-------------- @@ -72,6 +96,19 @@ select * from (select year, order_mode, yearly_total from (select * from rotate_ 2022 | 5000 | (3 rows) +with tt as( + select year, order_mode, order_total from original_orders +) +select * from (select year, direct, online from tt rotate (sum(order_total) for order_mode in ('direct', 'online')) order by year) as rotate_t not rotate ( yearly_total for order_mode in (direct, online)); + year | order_mode | yearly_total +------+------------+-------------- + 2022 | direct | 5000 + 2021 | direct | 1000 + 2020 | direct | 5500 + 2021 | online | 1000 + 2020 | online | 1000 +(5 rows) + -- create view create view rotate_view as (select * from (select year, order_mode, order_total from original_orders) as t rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year); select * from rotate_view; @@ -104,6 +141,17 @@ select * from (select year, direct as store, online as internet from (select yea 2022 | 5000 | (3 rows) +with orders as ( + select year, order_mode, order_total from original_orders +) +select * from (select year, direct as store, online as internet from orders rotate (sum(order_total) for order_mode in ('direct', 'online')) )order by year; + year | store | internet +------+-------+---------- + 2020 | 5500 | 1000 + 2021 | 1000 | 1000 + 2022 | 5000 | +(3 rows) + select year, order_mode, yearly_total from(select * from rotate_orders not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online'))) where year> 2020; year | order_mode | yearly_total ------+------------+-------------- @@ -112,6 +160,34 @@ select year, order_mode, yearly_total from(select * from rotate_orders not rotat 2021 | online | 1000 (3 rows) +with a_tab as ( + select * from (select year, order_mode, order_total from original_orders ) as orders rotate (sum(order_total) for order_mode in ('direct', 'online')) order by year +), +b_tab as ( + select * from a_tab +) +select * from b_tab; + year | direct | online +------+--------+-------- + 2020 | 5500 | 1000 + 2021 | 1000 | 1000 + 2022 | 5000 | +(3 rows) + +with a_tab as ( + select * from rotate_orders +), +b_tab as (select * from (select * from a_tab not rotate exclude nulls (yearly_total for order_mode in ( store as 'direct', internet as 'online'))) order by year ) +select * from b_tab; + year | order_mode | yearly_total +------+------------+-------------- + 2020 | direct | 5500 + 2020 | online | 1000 + 2021 | direct | 1000 + 2021 | online | 1000 + 2022 | direct | 5000 +(5 rows) + -- SMP set query_dop = 4; select * from ( select year, order_mode, order_total from original_orders) rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; @@ -122,6 +198,17 @@ select * from ( select year, order_mode, order_total from original_orders) rotat 2022 | 5000 | (3 rows) +with tt as ( + select year, order_mode, order_total from original_orders +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; + year | store | internet +------+-------+---------- + 2020 | 5500 | 1000 + 2021 | 1000 | 1000 + 2022 | 5000 | +(3 rows) + select * from rotate_orders not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); year | order_mode | yearly_total ------+------------+-------------- @@ -167,6 +254,39 @@ select * from "rotate@orders" not rotate ( yearly_total for order_mode in ( stor 2021 | online | 1000 (5 rows) +--procedure +CREATE OR REPLACE PROCEDURE proc_rotate is +DECLARE + total_max int; +BEGIN + SELECT max(order_total) + INTO total_max + FROM original_orders; +RAISE NOTICE 'total_max: %', total_max; +CREATE TABLE proc_rotate_tt AS + SELECT * FROM ( + SELECT * FROM rotate_orders + WHERE store < total_max ) + not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); + COMMIT; +END; +/ +call proc_rotate(); +NOTICE: total_max: 5000 + proc_rotate +------------- + +(1 row) + +select * from proc_rotate_tt; + year | order_mode | yearly_total +------+------------+-------------- + 2021 | direct | 1000 + 2021 | online | 1000 +(2 rows) + +drop procedure proc_rotate; +drop table proc_rotate_tt; drop table "'rotate'orders"; drop table "ROTATEorders"; drop table "rotate@orders"; @@ -275,6 +395,15 @@ select * from (select name, value from product_column) rotate(sum(value) for nam 50 | 70 | 90 (1 row) +with tt as ( + select name, value from product_column +) +select * from tt rotate(sum(value) for name in ('a','b','c')); + a | b | c +----+----+---- + 50 | 70 | 90 +(1 row) + create table product_column_un (a int, b int, c int) with (orientation = column); insert into product_column_un values(50,70,90); select * from product_column_un not rotate (value for name in (a,b,c)); @@ -299,6 +428,17 @@ for order_mode in ('direct' as store, 'online' as internet)) order by year; 2022 | 2000 | (3 rows) +with tt as ( + select year, order_mode, order_total from orders_par +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; + year | store | internet +------+-------+---------- + 2020 | 5500 | 1000 + 2021 | 100 | 200 + 2022 | 2000 | +(3 rows) + select * from ( select year, order_mode, order_total from orders_par partition(par1)) rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; year | store | internet @@ -307,6 +447,16 @@ for order_mode in ('direct' as store, 'online' as internet)) order by year; 2021 | 100 | 200 (2 rows) +with tt as ( + select year, order_mode, order_total from orders_par partition(par1) +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; + year | store | internet +------+-------+---------- + 2020 | 500 | + 2021 | 100 | 200 +(2 rows) + create table rotate_orders_par(year int, store int, internet int) partition by range(store)(partition par1 values less than (1000),partition par2 values less than (maxvalue)); insert into rotate_orders_par values (2020, 5500,1000),(2021,100,200),(2022,2000,null); select * from rotate_orders_par not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); @@ -330,6 +480,15 @@ select * from (select name, value from product_ustore) rotate(sum(value) for nam 50 | 70 | 90 (1 row) +with tt as ( + select name, value from product_ustore +) +select * from tt rotate(sum(value) for name in ('a','b','c')); + a | b | c +----+----+---- + 50 | 70 | 90 +(1 row) + create table stu_ustore (name varchar(20), math int, english int, chinese int) with (storage_type=ustore); insert into stu_ustore values('Tom',10,20,30); select * from stu_ustore not rotate (num for list in (math, english,chinese)); @@ -351,6 +510,15 @@ select * from (select name, value from product_segment) rotate(sum(value) for na 50 | 70 | 90 (1 row) +with tt as ( + select name, value from product_segment +) +select * from tt rotate(sum(value) for name in ('a','b','c')); + a | b | c +----+----+---- + 50 | 70 | 90 +(1 row) + create table stu_segment (name varchar(20), math int, english int, chinese int) with (segment=on); insert into stu_segment values('Tom',10,20,30); select * from stu_segment not rotate (num for list in (math, english,chinese)); diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 1f9aadadf2..69f43d5eef 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -488,3 +488,6 @@ test: gin_select # test for binary_double suffix and Constant values test: test_binary_suffix + +# test for rotate and unrotate function +test: gb_ora_rotate_unrotate diff --git a/src/test/regress/parallel_schedule0C b/src/test/regress/parallel_schedule0C index 809f0d54c7..bbb61c369e 100644 --- a/src/test/regress/parallel_schedule0C +++ b/src/test/regress/parallel_schedule0C @@ -190,6 +190,4 @@ test: user_host_test # test for new_expr_by_flatten test: enable_expr_fusion_flatten -# test for rotate and unrotate function -test: gb_ora_rotate_unrotate diff --git a/src/test/regress/sql/gb_ora_rotate_unrotate.sql b/src/test/regress/sql/gb_ora_rotate_unrotate.sql index 91eb31b981..e161cf8b2a 100644 --- a/src/test/regress/sql/gb_ora_rotate_unrotate.sql +++ b/src/test/regress/sql/gb_ora_rotate_unrotate.sql @@ -7,6 +7,10 @@ create table original_orders (id int, year int, order_mode text, order_total int insert into original_orders values (1,2020,'direct',5000), (2,2020,'online',1000), (3,2021,'online',1000), (4,2021,'direct',1000), (5,2022,'direct',5000), (6,2020,'direct',500); select * from ( select year, order_mode, order_total from original_orders) rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; +with tt as ( + select year, order_mode, order_total from original_orders +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; select * from (select year, order_mode, order_total from original_orders) rotate (sum(order_total) for order_mode in ('online' as internet )) order by year; @@ -14,6 +18,10 @@ create table rotate_orders as (select * from (select year, order_mode, order_tot -- test not rotate (column transform to row) select * from rotate_orders not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); +with tt as ( + select * from rotate_orders +) +select * from tt not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')) order by year; select * from rotate_orders not rotate exclude nulls (yearly_total for order_mode in ( store as 'direct', internet as 'online')); @@ -23,6 +31,10 @@ select * from rotate_orders not rotate include nulls ( yearly_total for ordre_mo select * from (select year, direct, online from (select year, order_mode, order_total from original_orders) rotate (sum(order_total) for order_mode in ('direct', 'online')) order by year) as rotate_t not rotate ( yearly_total for order_mode in (direct, online)); select * from (select year, order_mode, yearly_total from (select * from rotate_orders not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')))) rotate (sum(yearly_total) for order_mode in ('direct' as store, 'online' as internet) ) order by year; +with tt as( + select year, order_mode, order_total from original_orders +) +select * from (select year, direct, online from tt rotate (sum(order_total) for order_mode in ('direct', 'online')) order by year) as rotate_t not rotate ( yearly_total for order_mode in (direct, online)); -- create view create view rotate_view as (select * from (select year, order_mode, order_total from original_orders) as t rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year); @@ -36,12 +48,34 @@ drop view notrotate_view; -- 子查询 select * from (select year, direct as store, online as internet from (select year, order_mode, order_total from original_orders ) as orders rotate (sum(order_total) for order_mode in ('direct', 'online')) )order by year; +with orders as ( + select year, order_mode, order_total from original_orders +) +select * from (select year, direct as store, online as internet from orders rotate (sum(order_total) for order_mode in ('direct', 'online')) )order by year; select year, order_mode, yearly_total from(select * from rotate_orders not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online'))) where year> 2020; +with a_tab as ( + select * from (select year, order_mode, order_total from original_orders ) as orders rotate (sum(order_total) for order_mode in ('direct', 'online')) order by year +), +b_tab as ( + select * from a_tab +) +select * from b_tab; + +with a_tab as ( + select * from rotate_orders +), +b_tab as (select * from (select * from a_tab not rotate exclude nulls (yearly_total for order_mode in ( store as 'direct', internet as 'online'))) order by year ) +select * from b_tab; -- SMP set query_dop = 4; select * from ( select year, order_mode, order_total from original_orders) rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; +with tt as ( + select year, order_mode, order_total from original_orders +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; + select * from rotate_orders not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); set query_dop = 1; @@ -55,6 +89,29 @@ select * from "ROTATEorders" not rotate ( yearly_total for order_mode in ( store create table "rotate@orders" as (select * from (select year, order_mode, order_total from original_orders) as t rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year); select * from "rotate@orders" not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); +--procedure +CREATE OR REPLACE PROCEDURE proc_rotate is +DECLARE + total_max int; +BEGIN + SELECT max(order_total) + INTO total_max + FROM original_orders; +RAISE NOTICE 'total_max: %', total_max; +CREATE TABLE proc_rotate_tt AS + SELECT * FROM ( + SELECT * FROM rotate_orders + WHERE store < total_max ) + not rotate ( yearly_total for order_mode in ( store as 'direct', internet as 'online')); + COMMIT; +END; +/ + +call proc_rotate(); +select * from proc_rotate_tt; + +drop procedure proc_rotate; +drop table proc_rotate_tt; drop table "'rotate'orders"; drop table "ROTATEorders"; drop table "rotate@orders"; @@ -112,6 +169,10 @@ insert into product_column values (1,'a',10),(2,'b',20),(3,'c',30),(4,'a',40),(5,'b',50),(6,'c',60); select * from (select name, value from product_column) rotate(sum(value) for name in ('a','b','c')); +with tt as ( + select name, value from product_column +) +select * from tt rotate(sum(value) for name in ('a','b','c')); create table product_column_un (a int, b int, c int) with (orientation = column); insert into product_column_un values(50,70,90); select * from product_column_un not rotate (value for name in (a,b,c)); @@ -124,8 +185,16 @@ create table orders_par (id int, year int, order_mode text, order_total int) par insert into orders_par values (1,2020,'direct',500), (2,2020,'online',1000), (3,2021,'online',200), (4,2021,'direct',100), (5,2022,'direct',2000), (6,2020,'direct',5000); select * from ( select year, order_mode, order_total from orders_par) rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; +with tt as ( + select year, order_mode, order_total from orders_par +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; select * from ( select year, order_mode, order_total from orders_par partition(par1)) rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; +with tt as ( + select year, order_mode, order_total from orders_par partition(par1) +) +select * from tt rotate (sum(order_total) for order_mode in ('direct' as store, 'online' as internet)) order by year; create table rotate_orders_par(year int, store int, internet int) partition by range(store)(partition par1 values less than (1000),partition par2 values less than (maxvalue)); insert into rotate_orders_par values (2020, 5500,1000),(2021,100,200),(2022,2000,null); @@ -138,6 +207,10 @@ drop table rotate_orders_par; create table product_ustore(id int, name varchar(10), value int) with (storage_type=ustore); insert into product_ustore values (10,'a',10),(20,'b',20),(30,'c',30),(101,'a',40),(201,'b',50),(301,'c',60); select * from (select name, value from product_ustore) rotate(sum(value) for name in ('a','b','c')); +with tt as ( + select name, value from product_ustore +) +select * from tt rotate(sum(value) for name in ('a','b','c')); create table stu_ustore (name varchar(20), math int, english int, chinese int) with (storage_type=ustore); insert into stu_ustore values('Tom',10,20,30); @@ -150,6 +223,10 @@ drop table stu_ustore; create table product_segment(id int, name varchar(10), value int) with (segment=on); insert into product_segment values (1,'a',10),(2,'b',20),(3,'c',30),(4,'a',40),(5,'b',50),(6,'c',60); select * from (select name, value from product_segment) rotate(sum(value) for name in ('a','b','c')); +with tt as ( + select name, value from product_segment +) +select * from tt rotate(sum(value) for name in ('a','b','c')); create table stu_segment (name varchar(20), math int, english int, chinese int) with (segment=on); insert into stu_segment values('Tom',10,20,30); -- Gitee From 5fb4738b59dd8bc24ddfa730047efa838174e02a Mon Sep 17 00:00:00 2001 From: q00421813 Date: Wed, 7 Aug 2024 18:53:01 +0800 Subject: [PATCH 155/347] uzone release buf --- src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp index fa64547d82..5d6c6859c7 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp @@ -885,6 +885,7 @@ UndoZone *UndoZoneGroup::SwitchZone(int zid, UndoPersistence upersistence) int retZid = -1; uzone->PrepareSwitch(); LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); + uzone->ReleaseSlotBuffer(); UndoZone *newUzone = getUnusedZone(upersistence, &retZid, zid); WHITEBOX_TEST_STUB(UNDO_SWITCH_ZONE_FAILED, WhiteboxDefaultErrorEmit); newUzone->Attach(); -- Gitee From 9789bb23009cd8d7834354fd5bc110179497bbdc Mon Sep 17 00:00:00 2001 From: JT_aimo Date: Thu, 8 Aug 2024 10:07:02 +0800 Subject: [PATCH 156/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9RTO=E6=80=A7=E8=83=BD?= =?UTF-8?q?=E4=BC=98=E5=8C=96pr=E5=BC=95=E5=85=A5=E7=9A=84=E5=88=9B?= =?UTF-8?q?=E5=BB=BA=E8=A1=A8=E7=A9=BA=E9=97=B4=E5=A4=B1=E8=B4=A5=E7=9A=84?= =?UTF-8?q?bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/smgr/segment/extent_group.cpp | 2 +- src/gausskernel/storage/smgr/segment/space.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/storage/smgr/segment/extent_group.cpp b/src/gausskernel/storage/smgr/segment/extent_group.cpp index 916e4de6be..970ee311db 100644 --- a/src/gausskernel/storage/smgr/segment/extent_group.cpp +++ b/src/gausskernel/storage/smgr/segment/extent_group.cpp @@ -109,7 +109,7 @@ void eg_init_df_ctrl(SegExtentGroup *seg) SegLogicFile *sf = (SegLogicFile *)palloc(sizeof(SegLogicFile)); MemoryContextSwitchTo(oldcnxt); df_ctrl_init(sf, seg->rnode, seg->forknum); - if (!ENABLE_DMS) { + if (!SS_STANDBY_MODE) { df_open_files(sf); } seg->segfile = sf; diff --git a/src/gausskernel/storage/smgr/segment/space.cpp b/src/gausskernel/storage/smgr/segment/space.cpp index dff60bdc46..f72d4cef96 100644 --- a/src/gausskernel/storage/smgr/segment/space.cpp +++ b/src/gausskernel/storage/smgr/segment/space.cpp @@ -245,7 +245,7 @@ void InitSpaceNode(SegSpace *spc, Oid spcNode, Oid dbNode, bool is_redo) } } - if (ENABLE_DMS) { + if (SS_STANDBY_MODE) { SSInitSegLogicFile(spc); } } -- Gitee From 9299bbc799594668ddee3322e729b2e0655a331d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Thu, 8 Aug 2024 10:46:39 +0800 Subject: [PATCH 157/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dshow=20behavior=5Fcom?= =?UTF-8?q?pat=5Foptions=E6=98=BE=E7=A4=BA=E9=87=8D=E5=A4=8D=E8=AE=BE?= =?UTF-8?q?=E7=BD=AE=E7=9A=84=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc/guc_sql.cpp | 40 ++++++++++++++- .../expected/numeric_hide_tailing_zero.out | 51 +++++++++++++++++++ .../regress/sql/numeric_hide_tailing_zero.sql | 11 ++++ 3 files changed, 101 insertions(+), 1 deletion(-) diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index eeb0c890f5..c93ebfd885 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -179,6 +179,7 @@ static bool check_b_format_behavior_compat_options(char **newval, void **extra, static void assign_b_format_behavior_compat_options(const char *newval, void *extra); static bool check_behavior_compat_options(char** newval, void** extra, GucSource source); static void assign_behavior_compat_options(const char* newval, void* extra); +static const char* show_behavior_compat_options(void); static bool check_plsql_compile_behavior_compat_options(char** newval, void** extra, GucSource source); static void assign_plsql_compile_behavior_compat_options(const char* newval, void* extra); static void assign_connection_info(const char* newval, void* extra); @@ -2919,7 +2920,7 @@ static void InitSqlConfigureNamesString() "", check_behavior_compat_options, assign_behavior_compat_options, - NULL}, + show_behavior_compat_options}, {{"disable_keyword_options", PGC_USERSET, NODE_ALL, @@ -3705,6 +3706,43 @@ static void assign_behavior_compat_options(const char* newval, void* extra) u_sess->utils_cxt.behavior_compat_flags = result; } +static const char* show_behavior_compat_options(void) +{ + char *rawstring = NULL; + List *elemlist = NULL; + ListCell *cell = NULL; + int start = 0; + int64 result = 0; + StringInfoData strInfo; + bool isFirst = true; + initStringInfo(&strInfo); + + rawstring = pstrdup(u_sess->attr.attr_sql.behavior_compat_string); + (void)SplitIdentifierString(rawstring, ',', &elemlist); + + foreach (cell, elemlist) { + for (start = 0; start < OPT_MAX; start++) { + const char *item = (const char*)lfirst(cell); + + if (strcmp(item, behavior_compat_options[start].name) == 0 + && (result & behavior_compat_options[start].flag) == 0) { + result += behavior_compat_options[start].flag; + if (isFirst) { + isFirst = false; + appendStringInfo(&strInfo, "%s", item); + } else { + appendStringInfo(&strInfo, ",%s", item); + } + } + } + } + + pfree(rawstring); + list_free(elemlist); + + return (const char *)strInfo.data; +} + typedef int16 (*getIgnoreKeywordTokenHook)(const char *item); static int get_ignore_keyword_token(const char *item) diff --git a/src/test/regress/expected/numeric_hide_tailing_zero.out b/src/test/regress/expected/numeric_hide_tailing_zero.out index 2b619fad3c..80c3cc14ca 100644 --- a/src/test/regress/expected/numeric_hide_tailing_zero.out +++ b/src/test/regress/expected/numeric_hide_tailing_zero.out @@ -31,7 +31,58 @@ select cast(123.123 as numeric(15,10)); 123.123 (1 row) +show behavior_compat_options; + behavior_compat_options +------------------------- + hide_tailing_zero +(1 row) + +set behavior_compat_options='hide_tailing_zero,hide_tailing_zero,truncate_numeric_tail_zero,display_leading_zero'; +show behavior_compat_options; + behavior_compat_options +------------------------------------------------------------------- + hide_tailing_zero,truncate_numeric_tail_zero,display_leading_zero +(1 row) + +select cast(123.123 as numeric(15,10)); + numeric +--------- + 123.123 +(1 row) + +set behavior_compat_options='hide_tailing_zero,truncate_numeric_tail_zero,display_leading_zero,display_leading_zero,correct_to_number'; +show behavior_compat_options; + behavior_compat_options +------------------------------------------------------------------------------------- + hide_tailing_zero,truncate_numeric_tail_zero,display_leading_zero,correct_to_number +(1 row) + +select cast(123.123 as numeric(15,10)); + numeric +--------- + 123.123 +(1 row) + +set behavior_compat_options='truncate_numeric_tail_zero,truncate_numeric_tail_zero, hide_tailing_zero,hide_tailing_zero,hide_tailing_zero, truncate_numeric_tail_zero,hide_tailing_zero'; +show behavior_compat_options; + behavior_compat_options +---------------------------------------------- + truncate_numeric_tail_zero,hide_tailing_zero +(1 row) + +select cast(123.123 as numeric(15,10)); + numeric +--------- + 123.123 +(1 row) + set behavior_compat_options=''; +show behavior_compat_options; + behavior_compat_options +------------------------- + +(1 row) + set behavior_compat_options='truncate_numeric_tail_zero'; create table test_num_zero (a number,b int); insert into test_num_zero values(0.1000, 1); diff --git a/src/test/regress/sql/numeric_hide_tailing_zero.sql b/src/test/regress/sql/numeric_hide_tailing_zero.sql index 63c16e5ecf..b52e5d3458 100644 --- a/src/test/regress/sql/numeric_hide_tailing_zero.sql +++ b/src/test/regress/sql/numeric_hide_tailing_zero.sql @@ -6,7 +6,18 @@ select cast(0 as numeric(15,10)); select cast(009.0000 as numeric(15,10)); set behavior_compat_options='hide_tailing_zero,hide_tailing_zero'; select cast(123.123 as numeric(15,10)); +show behavior_compat_options; +set behavior_compat_options='hide_tailing_zero,hide_tailing_zero,truncate_numeric_tail_zero,display_leading_zero'; +show behavior_compat_options; +select cast(123.123 as numeric(15,10)); +set behavior_compat_options='hide_tailing_zero,truncate_numeric_tail_zero,display_leading_zero,display_leading_zero,correct_to_number'; +show behavior_compat_options; +select cast(123.123 as numeric(15,10)); +set behavior_compat_options='truncate_numeric_tail_zero,truncate_numeric_tail_zero, hide_tailing_zero,hide_tailing_zero,hide_tailing_zero, truncate_numeric_tail_zero,hide_tailing_zero'; +show behavior_compat_options; +select cast(123.123 as numeric(15,10)); set behavior_compat_options=''; +show behavior_compat_options; set behavior_compat_options='truncate_numeric_tail_zero'; create table test_num_zero (a number,b int); -- Gitee From 5de9c36e0fd29f8441b7e87a5ae7fbae68098d25 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 8 Aug 2024 10:46:19 +0800 Subject: [PATCH 158/347] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=B5=8C=E5=A5=97?= =?UTF-8?q?=E6=95=B0=E7=BB=84=E6=8F=92=E5=85=A5=E8=A1=A8=E6=97=B6=E6=8A=A5?= =?UTF-8?q?=E9=94=99=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_target.cpp | 6 ++ src/common/pl/plpgsql/src/pl_comp.cpp | 28 +++++- .../expected/plpgsql_table_opengauss.out | 95 +++++++++++++++++++ .../regress/sql/plpgsql_table_opengauss.sql | 79 +++++++++++++++ 4 files changed, 207 insertions(+), 1 deletion(-) diff --git a/src/common/backend/parser/parse_target.cpp b/src/common/backend/parser/parse_target.cpp index ff960c57c4..22991b0acb 100644 --- a/src/common/backend/parser/parse_target.cpp +++ b/src/common/backend/parser/parse_target.cpp @@ -54,6 +54,8 @@ static List* ExpandSingleTable(ParseState* pstate, RangeTblEntry* rte, int locat static List* ExpandRowReference(ParseState* pstate, Node* expr, bool targetlist); static int FigureColnameInternal(Node* node, char** name); +extern void checkArrayTypeInsert(ParseState* pstate, Expr* expr); + /* * @Description: return the last filed's name and ignore * or subscrpts * @in field: list of field to search @@ -443,6 +445,10 @@ Expr* transformAssignedExpr(ParseState* pstate, Expr* expr, ParseExprKind exprKi type_id = exprType((Node*)expr); type_mod = exprTypmod((Node*)expr); + if (IsA(expr, Param) || (IsA(expr, ArrayRef) && ((ArrayRef*)expr)->refexpr != NULL)) { + checkArrayTypeInsert(pstate, expr); + } + ELOG_FIELD_NAME_START(colname); /* diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index c853a087d4..b6f2f8499b 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -5685,4 +5685,30 @@ Oid searchsubtypebytypeId(Oid typeOid, int32 *typmod) errmsg("unsupported search subtype by type %u", typeOid))); } return type == TYPTYPE_TABLEOF ? searchsubtypebytypeId(resultType, typmod) : resultType; -} \ No newline at end of file +} + +void checkArrayTypeInsert(ParseState* pstate, Expr* expr) +{ + Param* param = NULL; + if (IsA(expr, Param)) { + param = (Param*)expr; + } else if (IsA(expr, ArrayRef) && IsA(((ArrayRef*)expr)->refexpr, Param)) { + param = (Param*)((ArrayRef*)expr)->refexpr; + } else { + return; + } + + if (pstate->p_pre_columnref_hook == plpgsql_pre_column_ref && pstate->p_ref_hook_state != NULL) { + PLpgSQL_expr* pl_expr = (PLpgSQL_expr*)pstate->p_ref_hook_state; + + if (pl_expr->func != NULL && param->paramid <= pl_expr->func->ndatums && + pl_expr->func->datums[param->paramid - 1]->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = (PLpgSQL_var*)pl_expr->func->datums[param->paramid - 1]; + + if (var->nest_table != NULL && (IsA(expr, Param) || + var->nest_layers != list_length(((ArrayRef*)expr)->refupperindexpr))) { + ereport(ERROR, (errmsg("The tableof type variable cannot be used as an insertion value. "))); + } + } + } +} diff --git a/src/test/regress/expected/plpgsql_table_opengauss.out b/src/test/regress/expected/plpgsql_table_opengauss.out index 2c207040f4..f66bd458d6 100644 --- a/src/test/regress/expected/plpgsql_table_opengauss.out +++ b/src/test/regress/expected/plpgsql_table_opengauss.out @@ -926,6 +926,101 @@ INFO: plpgsql_table_opengauss.pck1.v2(2) is 3 0 (1 row) +create table t_PLArray_ (id int ,col varchar(500)); +create or replace procedure p_PLArray_ +as +type typ_PLArray_1 is table of varchar(50); +type typ_PLArray_2 is table of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +nstarr(1)(2):='第一行第二列'; + +insert into t_PLArray_(col) values (nstarr(1)); +end; +/ +call p_PLArray_(); +ERROR: The tableof type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t_PLArray_(col) values (nstarr[1])" +PL/pgSQL function p_plarray_() line 8 at SQL statement +create or replace procedure p_PLArray_ +as +type typ_PLArray_1 is table of varchar(50); +type typ_PLArray_2 is table of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +nstarr(1)(2):='第一行第二列'; + +insert into t_PLArray_(col) values (nstarr); +end; +/ +call p_PLArray_(); +ERROR: The tableof type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t_PLArray_(col) values (nstarr)" +PL/pgSQL function p_plarray_() line 8 at SQL statement +create or replace procedure p_PLArray_ +as +type typ_PLArray_1 is table of varchar(50); +type typ_PLArray_2 is table of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +nstarr(1)(2):='第一行第二列'; + +insert into t_PLArray_(col) values (nstarr(1)(1)); +insert into t_PLArray_(col) values (nstarr(1)(2)); +end; +/ +call p_PLArray_(); + p_plarray_ +------------ + +(1 row) + +declare +type typ_PLArray_1 is varray(3) of varchar(50); +type typ_PLArray_2 is varray(3) of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +insert into t_PLArray_(col) values (nstarr(1)); +end; +/ +ERROR: The tableof type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t_PLArray_(col) values (nstarr[1])" +PL/pgSQL function inline_code_block line 6 at SQL statement +declare +type typ_PLArray_1 is varray(3) of varchar(50); +type typ_PLArray_2 is varray(3) of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +insert into t_PLArray_(col) values (nstarr); +end; +/ +ERROR: The tableof type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t_PLArray_(col) values (nstarr)" +PL/pgSQL function inline_code_block line 6 at SQL statement +declare +type typ_PLArray_1 is varray(3) of varchar(50); +type typ_PLArray_2 is varray(3) of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +insert into t_PLArray_(col) values (nstarr(1)(1)); +end; +/ +select * from t_PLArray_; + id | col +----+-------------- + | 第一行第一列 + | 第一行第二列 + | 第一行第一列 +(3 rows) + +drop table t_PLArray_; +drop procedure p_PLArray_; -- test pg_get_function_result and pg_get_function_arguments create type ty_test is(col1 int,col2 char(10),col3 varchar2(10)); create type tyt_test is table of ty_test; diff --git a/src/test/regress/sql/plpgsql_table_opengauss.sql b/src/test/regress/sql/plpgsql_table_opengauss.sql index 323898b374..ab586da3c7 100644 --- a/src/test/regress/sql/plpgsql_table_opengauss.sql +++ b/src/test/regress/sql/plpgsql_table_opengauss.sql @@ -769,6 +769,85 @@ end; / call func2(); +create table t_PLArray_ (id int ,col varchar(500)); +create or replace procedure p_PLArray_ +as +type typ_PLArray_1 is table of varchar(50); +type typ_PLArray_2 is table of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +nstarr(1)(2):='第一行第二列'; + +insert into t_PLArray_(col) values (nstarr(1)); +end; +/ +call p_PLArray_(); + +create or replace procedure p_PLArray_ +as +type typ_PLArray_1 is table of varchar(50); +type typ_PLArray_2 is table of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +nstarr(1)(2):='第一行第二列'; + +insert into t_PLArray_(col) values (nstarr); +end; +/ +call p_PLArray_(); + +create or replace procedure p_PLArray_ +as +type typ_PLArray_1 is table of varchar(50); +type typ_PLArray_2 is table of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +nstarr(1)(2):='第一行第二列'; + +insert into t_PLArray_(col) values (nstarr(1)(1)); +insert into t_PLArray_(col) values (nstarr(1)(2)); +end; +/ +call p_PLArray_(); + +declare +type typ_PLArray_1 is varray(3) of varchar(50); +type typ_PLArray_2 is varray(3) of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +insert into t_PLArray_(col) values (nstarr(1)); +end; +/ + +declare +type typ_PLArray_1 is varray(3) of varchar(50); +type typ_PLArray_2 is varray(3) of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +insert into t_PLArray_(col) values (nstarr); +end; +/ + +declare +type typ_PLArray_1 is varray(3) of varchar(50); +type typ_PLArray_2 is varray(3) of typ_PLArray_1; +nstarr typ_PLArray_2; +begin +nstarr(1)(1):='第一行第一列'; +insert into t_PLArray_(col) values (nstarr(1)(1)); +end; +/ + +select * from t_PLArray_; + +drop table t_PLArray_; +drop procedure p_PLArray_; + -- test pg_get_function_result and pg_get_function_arguments create type ty_test is(col1 int,col2 char(10),col3 varchar2(10)); create type tyt_test is table of ty_test; -- Gitee From 7e3ce7481429721d0a07af8e0a3527ffe3096f0d Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 9 May 2024 15:43:54 +0800 Subject: [PATCH 159/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E9=83=A8=E5=88=86=E7=B1=BB=E5=9E=8B=E4=B9=8B=E9=97=B4=E4=B8=8D?= =?UTF-8?q?=E6=94=AF=E6=8C=81GREATEST/LEAST=E5=87=BD=E6=95=B0=EF=BC=8C?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E5=86=85=E6=A0=B8=E7=BB=93=E6=9E=84=E4=BD=93?= =?UTF-8?q?=E5=AD=97=E6=AE=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/copyfuncs.cpp | 4 ++++ src/common/backend/nodes/equalfuncs.cpp | 4 ++++ src/common/backend/nodes/outfuncs.cpp | 6 ++++++ src/common/backend/nodes/readfuncs.cpp | 7 ++++++- src/common/backend/utils/init/globals.cpp | 1 + src/include/miscadmin.h | 1 + src/include/nodes/execnodes.h | 1 + src/include/nodes/primnodes.h | 2 ++ 8 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index b7c011adc0..1bf48d267e 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -3178,6 +3178,10 @@ static MinMaxExpr* _copyMinMaxExpr(const MinMaxExpr* from) COPY_SCALAR_FIELD(op); COPY_NODE_FIELD(args); COPY_LOCATION_FIELD(location); + if (t_thrd.proc->workingVersionNum >= MINMAXEXPR_CMPTYPE_VERSION_NUM) { + COPY_SCALAR_FIELD(cmptype); + COPY_NODE_FIELD(cmpargs); + } return newnode; } diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index a7a830c65f..9bce5df971 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -591,6 +591,10 @@ static bool _equalMinMaxExpr(const MinMaxExpr* a, const MinMaxExpr* b) COMPARE_SCALAR_FIELD(op); COMPARE_NODE_FIELD(args); COMPARE_LOCATION_FIELD(location); + if (t_thrd.proc->workingVersionNum >= MINMAXEXPR_CMPTYPE_VERSION_NUM) { + COMPARE_SCALAR_FIELD(cmptype); + COMPARE_NODE_FIELD(cmpargs); + } return true; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index c7849429d7..0f3be412ec 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -24,6 +24,7 @@ #include "miscadmin.h" #include "bulkload/dist_fdw.h" #include "foreign/fdwapi.h" +#include "nodes/primnodes.h" #include "nodes/plannodes.h" #include "nodes/relation.h" #include "nodes/nodeFuncs.h" @@ -3039,7 +3040,12 @@ static void _outMinMaxExpr(StringInfo str, MinMaxExpr* node) WRITE_ENUM_FIELD(op, MinMaxOp); WRITE_NODE_FIELD(args); WRITE_LOCATION_FIELD(location); + if (t_thrd.proc->workingVersionNum >= MINMAXEXPR_CMPTYPE_VERSION_NUM) { + WRITE_OID_FIELD(cmptype); + WRITE_NODE_FIELD(cmpargs); + WRITE_TYPEINFO_FIELD(cmptype); + } WRITE_TYPEINFO_FIELD(minmaxtype); } diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 46c0fcd4af..f4973a3a3f 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -33,7 +33,7 @@ #include "miscadmin.h" #include "bulkload/dist_fdw.h" #include "catalog/gs_opt_model.h" -#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" #include "foreign/fdwapi.h" #include "nodes/plannodes.h" #include "optimizer/dataskew.h" @@ -2904,7 +2904,12 @@ static MinMaxExpr* _readMinMaxExpr(void) READ_ENUM_FIELD(op, MinMaxOp); READ_NODE_FIELD(args); READ_LOCATION_FIELD(location); + if (t_thrd.proc->workingVersionNum >= MINMAXEXPR_CMPTYPE_VERSION_NUM) { + READ_OID_FIELD(cmptype); + READ_NODE_FIELD(cmpargs); + READ_TYPEINFO_FIELD(cmptype); + } READ_TYPEINFO_FIELD(minmaxtype); READ_DONE(); diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 00258ea4d2..42231d92f8 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -82,6 +82,7 @@ const uint32 GRAND_VERSION_NUM = 92947; * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 MINMAXEXPR_CMPTYPE_VERSION_NUM = 92948; const uint32 PARTITION_NAME_VERSION_NUM = 92947; const uint32 AUDIT_SHA_VERSION_NUM = 92946; const uint32 NETTIME_TRACE_VERSION_NUM = 92945; diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 13d0c030a5..c2bafe7b13 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -153,6 +153,7 @@ extern const uint32 STRAIGHT_JOIN_VERSION_NUMBER; extern const uint32 PARALLEL_ENABLE_VERSION_NUM; extern const uint32 AUDIT_SHA_VERSION_NUM; extern const uint32 PARTITION_NAME_VERSION_NUM; +extern const uint32 MINMAXEXPR_CMPTYPE_VERSION_NUM; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 577d35de03..c369b514ef 100755 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1237,6 +1237,7 @@ typedef struct CoalesceExprState { typedef struct MinMaxExprState { ExprState xprstate; List* args; /* the arguments */ + List* cmpargs; /* the comparison arguments */ FmgrInfo cfunc; /* lookup info for comparison func */ FunctionCallInfoData cinfo; diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index f6f1dcbfa9..35f6156cca 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -1033,6 +1033,8 @@ typedef struct MinMaxExpr { MinMaxOp op; /* function to execute */ List* args; /* the arguments */ int location; /* token location, or -1 if unknown */ + Oid cmptype; /* the comparison type */ + List* cmpargs; /* the comparison arguments */ } MinMaxExpr; /* -- Gitee From 5ed68aae4376d7ba702a1552de7ff8bf85295545 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Tue, 6 Aug 2024 17:08:39 +0800 Subject: [PATCH 160/347] =?UTF-8?q?=E5=AF=BC=E5=87=BA=E6=97=B6=E8=AE=BE?= =?UTF-8?q?=E7=BD=AEGUC=E5=8F=82=E6=95=B0=20behavior=5Fcompat=5Foptions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_backup_archiver.cpp | 23 +++++++ src/test/regress/input/test_float_dump.source | 30 +++++++++ .../output/cursor_expression_dump.source | 1 + .../output/event_trigger_dump_restore.source | 1 + src/test/regress/output/mysql_function.source | 1 + src/test/regress/output/plpgsql_dump.source | 1 + .../regress/output/test_float_dump.source | 64 +++++++++++++++++++ .../regress/output/view_definer_test.source | 1 + src/test/regress/parallel_schedule0A | 2 +- 9 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 src/test/regress/input/test_float_dump.source create mode 100644 src/test/regress/output/test_float_dump.source diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp index dfd4491c94..7600bac2dd 100644 --- a/src/bin/pg_dump/pg_backup_archiver.cpp +++ b/src/bin/pg_dump/pg_backup_archiver.cpp @@ -203,6 +203,7 @@ static ParallelStateEntry* GetMyPSEntry(ParallelState* pstate); static void archive_close_connection(int code, void* arg); static void take_down_nsname_in_drop_stmt(const char *stmt, char *result, int len); static void get_role_password(RestoreOptions* opts); +static char* GetBehaviorCompatOptions(ArchiveHandle* fout); /* * Wrapper functions. @@ -2835,6 +2836,11 @@ static void _doSetFixedOutputState(ArchiveHandle* AH) if (findDBCompatibility(&AH->publicArc, PQdb(GetConnection(&AH->publicArc))) && hasSpecificExtension(&AH->publicArc, "dolphin")) (void)ahprintf(AH, "SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length';\n"); + /* set behavior_compat_options */ + char* compatOptions = GetBehaviorCompatOptions(AH); + (void)ahprintf(AH, "SET behavior_compat_options = '%s';\n", compatOptions); + free(compatOptions); + (void)ahprintf(AH, "\n"); } @@ -5292,4 +5298,21 @@ bool hasSpecificExtension(Archive* fout, const char* extensionName) PQclear(res); destroyPQExpBuffer(query); return ntups != 0; +} + +static char* GetBehaviorCompatOptions(ArchiveHandle* fout) +{ + char* val = NULL; + PGresult* res = PQexec(fout->connection, "show behavior_compat_options;"); + + if (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK) { + val = gs_strdup(PQgetvalue(res, 0, 0)); + } else { + val = gs_strdup(""); + } + + PQclear(res); + res = NULL; + + return val; } \ No newline at end of file diff --git a/src/test/regress/input/test_float_dump.source b/src/test/regress/input/test_float_dump.source new file mode 100644 index 0000000000..48896516b0 --- /dev/null +++ b/src/test/regress/input/test_float_dump.source @@ -0,0 +1,30 @@ +create database test_float_dump; +\c test_float_dump +set search_path to test_float_dump; + +\c postgres +ALTER DATABASE test_float_dump SET behavior_compat_options TO 'float_as_numeric'; + +\c test_float_dump +show behavior_compat_options; +set behavior_compat_options = 'float_as_numeric'; + +create table test_float (c1 int, c2 float); +insert into test_float values (1, 3.14); +insert into test_float values (2, 1.79E+10); +insert into test_float values (3, -0.01); +select * from test_float order by c1; + +\! @abs_bindir@/gs_dump test_float_dump -p @portstring@ -f @abs_bindir@/test_float_dump.sql -F p -w >/dev/null 2>&1; echo $? + +drop table test_float; +\d + +\! @abs_bindir@/gsql -d test_float_dump -p @portstring@ -f @abs_bindir@/test_float_dump.sql; + +select * from test_float order by c1; + +drop table test_float; + +\c postgres +drop database test_float_dump; \ No newline at end of file diff --git a/src/test/regress/output/cursor_expression_dump.source b/src/test/regress/output/cursor_expression_dump.source index 315791618e..75c4932252 100644 --- a/src/test/regress/output/cursor_expression_dump.source +++ b/src/test/regress/output/cursor_expression_dump.source @@ -80,6 +80,7 @@ SET SET SET SET +SET CREATE SCHEMA ALTER SCHEMA SET diff --git a/src/test/regress/output/event_trigger_dump_restore.source b/src/test/regress/output/event_trigger_dump_restore.source index 572c821e3e..6b0ae8e1de 100644 --- a/src/test/regress/output/event_trigger_dump_restore.source +++ b/src/test/regress/output/event_trigger_dump_restore.source @@ -44,6 +44,7 @@ SET SET SET SET +SET CREATE FUNCTION ALTER FUNCTION CREATE EVENT TRIGGER diff --git a/src/test/regress/output/mysql_function.source b/src/test/regress/output/mysql_function.source index 3bfe266483..260aec06d8 100755 --- a/src/test/regress/output/mysql_function.source +++ b/src/test/regress/output/mysql_function.source @@ -115,6 +115,7 @@ select usename from pg_user where usesysid = (select proowner from pg_proc wher --? .* --? .* --? .* +--? .* \c mysqltestbak \sf proc_definer1 CREATE DEFINER = testusr1 PROCEDURE public.proc_definer1() diff --git a/src/test/regress/output/plpgsql_dump.source b/src/test/regress/output/plpgsql_dump.source index 6fb2943682..4c3f5633f9 100644 --- a/src/test/regress/output/plpgsql_dump.source +++ b/src/test/regress/output/plpgsql_dump.source @@ -112,6 +112,7 @@ SET SET SET SET +SET CREATE SCHEMA ALTER SCHEMA SET diff --git a/src/test/regress/output/test_float_dump.source b/src/test/regress/output/test_float_dump.source new file mode 100644 index 0000000000..bf4c26cd2e --- /dev/null +++ b/src/test/regress/output/test_float_dump.source @@ -0,0 +1,64 @@ +create database test_float_dump; +\c test_float_dump +set search_path to test_float_dump; +\c postgres +ALTER DATABASE test_float_dump SET behavior_compat_options TO 'float_as_numeric'; +\c test_float_dump +show behavior_compat_options; + behavior_compat_options +------------------------- + float_as_numeric +(1 row) + +set behavior_compat_options = 'float_as_numeric'; +create table test_float (c1 int, c2 float); +insert into test_float values (1, 3.14); +insert into test_float values (2, 1.79E+10); +insert into test_float values (3, -0.01); +select * from test_float order by c1; + c1 | c2 +----+------------- + 1 | 3.14 + 2 | 17900000000 + 3 | -.01 +(3 rows) + +\! @abs_bindir@/gs_dump test_float_dump -p @portstring@ -f @abs_bindir@/test_float_dump.sql -F p -w >/dev/null 2>&1; echo $? +0 +drop table test_float; +\d + List of relations + Schema | Name | Type | Owner | Storage +--------+------+------+-------+--------- +(0 rows) + +\! @abs_bindir@/gsql -d test_float_dump -p @portstring@ -f @abs_bindir@/test_float_dump.sql; +SET +SET +SET +SET +SET +SET +SET +SET +SET +SET +SET +CREATE TABLE +ALTER TABLE +REVOKE +REVOKE +GRANT +GRANT +--?.* +select * from test_float order by c1; + c1 | c2 +----+------------- + 1 | 3.14 + 2 | 17900000000 + 3 | -.01 +(3 rows) + +drop table test_float; +\c postgres +drop database test_float_dump; diff --git a/src/test/regress/output/view_definer_test.source b/src/test/regress/output/view_definer_test.source index 7676281355..4843666efc 100755 --- a/src/test/regress/output/view_definer_test.source +++ b/src/test/regress/output/view_definer_test.source @@ -157,6 +157,7 @@ SET SET SET SET +SET CREATE TABLE ALTER TABLE CREATE VIEW diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 1f9aadadf2..819c7cb6e0 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -472,7 +472,7 @@ test: select_into subselect_part2 gs_aggregate test: holdable_cursor cursor_expression cursor_expression_dump #test: portals_p2 window tsearch temp__6 col_subplan_base_2 -test: test_float test_numeric_with_neg_scale +test: test_float test_numeric_with_neg_scale test_float_dump test: alter_table_000 alter_table_002 alter_table_003 alter_table_modify #test: alter_table_001 alter_table_modify_ustore -- Gitee From a075baeb0c4c6f7f9071e234e2c271482eb06d8d Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Sat, 3 Aug 2024 16:39:39 +0800 Subject: [PATCH 161/347] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E6=89=93=E5=8C=85?= =?UTF-8?q?=E5=90=8D=E7=A7=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- build/script/cmake_package_mini.sh | 17 +++++++------- build/script/utils/cmake_compile.sh | 2 +- build/script/utils/common.sh | 26 ++++++++++----------- build/script/utils/internal_packages.sh | 1 - build/script/utils/make_compile.sh | 2 +- docker/upgrade/upgrade_common.sh | 30 +++++++++++++++++++++++++ liteom/install.sh | 13 ++--------- liteom/upgrade_common.sh | 3 +-- 8 files changed, 57 insertions(+), 37 deletions(-) diff --git a/build/script/cmake_package_mini.sh b/build/script/cmake_package_mini.sh index 7e918aed00..411e2219b4 100644 --- a/build/script/cmake_package_mini.sh +++ b/build/script/cmake_package_mini.sh @@ -43,7 +43,7 @@ else fi if [ X"$kernel" == X"euleros" ]; then - dist_version="EULER" + dist_version="EulerOS" elif [ X"$kernel" == X"centos" ]; then dist_version="CentOS" elif [ X"$kernel" == X"openeuler" ]; then @@ -54,6 +54,8 @@ else dist_version="Platform" fi +os_version=$(cat /etc/os-release | grep -w VERSION_ID | awk -F '"' '{print $2}') + show_package=false gcc_version="10.3.1" @@ -290,12 +292,11 @@ fi ####################################################################### ## declare all package name ####################################################################### -declare version_string="${mppdb_name_for_package}-${version_number}" -declare package_pre_name="${version_string}-${dist_version}-${PLATFORM_ARCH}" -declare server_package_name="${package_pre_name}.${install_package_format}.gz" +declare package_version_name="${version_number}-${dist_version}${os_version}-${PLATFORM_ARCH}" +declare server_package_name="${mppdb_name_for_package}-${package_version_name}.${install_package_format}.gz" -declare libpq_package_name="${package_pre_name}-Libpq.${install_package_format}.gz" -declare symbol_package_name="${package_pre_name}-symbol.${install_package_format}.gz" +declare libpq_package_name="${mppdb_name_for_package}-Libpq-${package_version_name}.${install_package_format}.gz" +declare symbol_package_name="${mppdb_name_for_package}-symbol-${package_version_name}.${install_package_format}.gz" echo "[makemppdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" ROOT_DIR=$(dirname "$SCRIPT_DIR") @@ -591,7 +592,7 @@ function target_file_copy_for_non_server() done } -declare bin_name="${package_pre_name}.bin" +declare bin_name="${mppdb_name_for_package}-${package_version_name}.bin" declare sha256_name='' declare script_dir="${ROOT_DIR}/script" @@ -643,7 +644,7 @@ function target_file_copy() echo "End generate ${bin_name} bin file" >> "$LOG_FILE" 2>&1 #generate sha256 file - sha256_name="${package_pre_name}.sha256" + sha256_name="${mppdb_name_for_package}-${package_version_name}.sha256" echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 sha256sum "${bin_name}" | awk -F" " '{print $1}' > "$sha256_name" if [ $? -ne 0 ]; then diff --git a/build/script/utils/cmake_compile.sh b/build/script/utils/cmake_compile.sh index 0caa1fa316..92c2ba2e36 100644 --- a/build/script/utils/cmake_compile.sh +++ b/build/script/utils/cmake_compile.sh @@ -21,7 +21,7 @@ function gaussdb_pkg_pre_clean() function read_gaussdb_version() { cd ${SCRIPT_DIR} - echo "${product_name}-${version_number}" > version.cfg + echo "${product_name}-Server-${version_number}" > version.cfg #auto read the number from kernal globals.cpp, no need to change it here } diff --git a/build/script/utils/common.sh b/build/script/utils/common.sh index 8b79e10433..af3d3dcb7b 100644 --- a/build/script/utils/common.sh +++ b/build/script/utils/common.sh @@ -59,14 +59,13 @@ select_package_command ####################################################################### ##get os dist version ####################################################################### +os_name=$(cat /etc/os-release | grep -w NAME | awk -F '"' '{print $2}') if [[ -f "/etc/openEuler-release" ]]; then os_name="openEuler" elif [[ -f "/etc/euleros-release" ]]; then os_name="EulerOS" elif [[ -f "/etc/centos-release" ]]; then os_name="CentOS" -elif [[ -f "/etc/openEuler-release" ]]; then - os_name="openEuler" elif [[ -f "/etc/FusionOS-release" ]]; then os_name="FusionOS" elif [[ -f "/etc/kylin-release" ]]; then @@ -78,13 +77,15 @@ elif [[ -f "/etc/CSIOS-release" ]]; then else os_name=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z | sed 's/.*/\L&/; s/[a-z]*/\u&/g') fi +os_version=$(cat /etc/os-release | grep -w VERSION_ID | awk -F '"' '{print $2}') + +if [ "$os_name"X == ""X ]; then + echo "os name is empty" + exit 1 +fi ##add platform architecture information if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - if [ "$os_name" != "openEuler" ] && [ "$os_name" != "EulerOS" ] && [ "$os_name" != "FusionOS" ] && [ "$os_name" != "Kylin" ] && [ "$dist_version" != "Asianux" ] && [ "$os_name" != "CSIOS" ]; then - echo "We only support NUMA on openEuler(aarch64), EulerOS(aarch64), FusionOS(aarch64), Kylin(aarch64), Asianux, CSIOS(aarch64) platform." - exit 1 - fi GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" fi @@ -144,10 +145,9 @@ declare release_file_list="${PLATFORM_ARCH}_${product_mode}_list" ####################################################################### ## declare all package name ####################################################################### -declare version_string="${product_name}-${version_number}" -declare package_pre_name="${version_string}-${os_name}-${PLATFORM}bit" -declare libpq_package_name="${package_pre_name}-Libpq.tar.gz" -declare tools_package_name="${package_pre_name}-tools.tar.gz" -declare kernel_package_name="${package_pre_name}.tar.bz2" -declare symbol_package_name="${package_pre_name}-symbol.tar.gz" -declare sha256_name="${package_pre_name}.sha256" +declare package_version_name="${version_number}-${os_name}${os_version}-${PLATFORM_ARCH}" +declare libpq_package_name="${product_name}-Libpq-${package_version_name}.tar.gz" +declare tools_package_name="${product_name}-Tools-${package_version_name}.tar.gz" +declare kernel_package_name="${product_name}-Server-${package_version_name}.tar.gz" +declare symbol_package_name="${product_name}-Symbol-${package_version_name}.tar.gz" +declare sha256_name="${product_name}-Server-${package_version_name}.sha256" diff --git a/build/script/utils/internal_packages.sh b/build/script/utils/internal_packages.sh index a920d15cba..f797b09fb2 100644 --- a/build/script/utils/internal_packages.sh +++ b/build/script/utils/internal_packages.sh @@ -71,7 +71,6 @@ function target_file_copy() echo "End generate ${kernel_package_name} tar file" >> "$LOG_FILE" 2>&1 #generate sha256 file - sha256_name="${package_pre_name}.sha256" echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 sha256sum "${kernel_package_name}" | awk -F" " '{print $1}' > "$sha256_name" if [ $? -ne 0 ]; then diff --git a/build/script/utils/make_compile.sh b/build/script/utils/make_compile.sh index d5d7f83d4f..9dc4a7b970 100644 --- a/build/script/utils/make_compile.sh +++ b/build/script/utils/make_compile.sh @@ -21,7 +21,7 @@ function gaussdb_pkg_pre_clean() function read_gaussdb_version() { cd ${SCRIPT_DIR} - echo "${product_name}-${version_number}" > version.cfg + echo "${product_name}-Server-${version_number}" > version.cfg #auto read the number from kernal globals.cpp, no need to change it here } diff --git a/docker/upgrade/upgrade_common.sh b/docker/upgrade/upgrade_common.sh index fb63888add..87dff17d60 100644 --- a/docker/upgrade/upgrade_common.sh +++ b/docker/upgrade/upgrade_common.sh @@ -988,6 +988,27 @@ function upgrade_pre() { log "no need do upgrade_pre step" fi } + +function cp_dolphin_upgrade_script_step1() { + if ls "$GAUSSHOME"/share/postgresql/extension/ | grep -qE "dolphin--(.*)--(.*)sql" ; then + if cp -f "$GAUSSHOME"/share/postgresql/extension/dolphin--*--*sql "$GAUSS_TMP_PATH"/ ; then + log "cp dolphin upgrade script step1[upgrade_pre] successfully" + else + die "cp dolphin upgrade script step1[upgrade_pre] failed" ${err_upgrade_pre} + fi + fi +} + +function cp_dolphin_upgrade_script_step2() { + if ls "$GAUSS_TMP_PATH/" | grep -qE "dolphin--(.*)--(.*)sql" ; then + if cp -f "$GAUSS_TMP_PATH"/dolphin--*--*sql "$GAUSSHOME"/share/postgresql/extension/ ; then + log "cp dolphin upgrade script step1[upgrade_bin] successfully" + else + die "cp dolphin upgrade script step1[upgrade_bin] failed" ${err_upgrade_pre} + fi + fi +} + function upgrade_pre_step1() { check_disk check_version @@ -996,6 +1017,7 @@ function upgrade_pre_step1() { prepare_sql_all fi bak_gauss + cp_dolphin_upgrade_script_step1 record_step 1 } @@ -1024,6 +1046,7 @@ function upgrade_pre_step2() { function upgrade_bin() { parses_step + cp_dolphin_upgrade_script_step2 if [[ "$current_step" -lt 0 ]]; then die "Step file may be changed invalid" ${err_upgrade_bin} elif [[ "$current_step" -lt 2 ]]; then @@ -1146,6 +1169,13 @@ function upgrade_post_step56() { die "Guassdb is not running" ${err_upgrade_post} fi record_step 5 + + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback-post_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback-post_otherdb.sql otherdb; then + debug "upgrade-rollback post sql successfully" + else + die "upgrade-rollback post sql failed" ${err_rollback_post} + fi + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade-post_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade-post_otherdb.sql otherdb; then debug "upgrade post sql successfully" else diff --git a/liteom/install.sh b/liteom/install.sh index cd592873e4..b40d69e89b 100644 --- a/liteom/install.sh +++ b/liteom/install.sh @@ -570,19 +570,10 @@ function decompress() fi log "kernel: ${kernel}" - # detect platform information. - platform=32 - bit=$(getconf LONG_BIT) - if [ "$bit" -eq 64 ] - then - platform=64 - fi - platform_arch=$(uname -p) - bin_name="openGauss-Lite.*-${kernel}-${platform_arch}" - bin_res=$(ls -a | grep -E "${bin_name}.bin") + bin_res=$(ls | grep openGauss-Lite*.bin) if [ "${bin_res}" = "" ] then - die "can not find suitable bin file, expected bin file is ${bin_name}.bin" + die "can not find suitable bin file, expected bin file is ${bin_res}" fi log "bin file: ${bin_res}" diff --git a/liteom/upgrade_common.sh b/liteom/upgrade_common.sh index e0fe9057bd..21cc46bfe8 100644 --- a/liteom/upgrade_common.sh +++ b/liteom/upgrade_common.sh @@ -399,8 +399,7 @@ function check_pkg() { #detect platform information. platform_arch=$(uname -p) - bin_name="openGauss-Lite.*-${kernel}-${platform_arch}" - binfile=$(ls -a | grep -E "${binname}.bin") + binfile=$(ls | grep openGauss-Lite*.bin) shafile=${binfile%.*}.sha256 if [[ ! -f "${binfile}" ]] || [[ ! -f "${shafile}" ]]; then die "bin or sha256 file not exit for the platform ${kernel}-${platform_arch}!" ${err_upgrade_pre} -- Gitee From 53b349d84947ee624800b23db06115def2d10280 Mon Sep 17 00:00:00 2001 From: Hemny Date: Mon, 5 Aug 2024 15:49:59 +0800 Subject: [PATCH 162/347] =?UTF-8?q?=E9=80=BB=E8=BE=91=E5=A4=8D=E5=88=B6?= =?UTF-8?q?=E6=94=AF=E6=8C=81Alter=20table=E7=9A=84DDL=E8=AF=AD=E5=8F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/mppdb_decoding/mppdb_decoding.cpp | 14 +- contrib/sql_decoding/sql_decoding.cpp | 126 + contrib/test_decoding/test_decoding.cpp | 65 +- src/bin/pg_dump/pg_dump.cpp | 32 +- src/bin/pg_dump/pg_dump.h | 1 + src/common/backend/catalog/builtin_funcs.ini | 4 + src/common/backend/catalog/heap.cpp | 25 +- src/common/backend/catalog/pg_publication.cpp | 1 + src/common/backend/nodes/copyfuncs.cpp | 13 + src/common/backend/nodes/equalfuncs.cpp | 12 + src/common/backend/nodes/outfuncs.cpp | 3 + src/common/backend/nodes/readfuncs.cpp | 3 + src/common/backend/parser/gram.y | 1 + src/common/backend/parser/parse_utilcmd.cpp | 1 + src/common/backend/utils/adt/regproc.cpp | 2 +- src/common/backend/utils/adt/ruleutils.cpp | 111 +- src/common/backend/utils/cache/relcache.cpp | 12 +- src/common/backend/utils/init/globals.cpp | 3 +- .../optimizer/commands/cluster.cpp | 14 +- .../optimizer/commands/ddldeparse.cpp | 4043 +++++++++++++++-- .../optimizer/commands/event_trigger.cpp | 189 +- .../optimizer/commands/eventcmds.cpp | 60 +- .../optimizer/commands/publicationcmds.cpp | 85 +- .../optimizer/commands/subscriptioncmds.cpp | 160 + .../optimizer/commands/tablecmds.cpp | 736 ++- src/gausskernel/process/tcop/utility.cpp | 24 +- .../storage/access/common/reloptions.cpp | 8 +- .../storage/access/heap/heapam.cpp | 7 + .../storage/access/redo/redo_heapam.cpp | 4 + .../storage/access/rmgrdesc/heapdesc.cpp | 15 + .../access/transam/extreme_rto/dispatcher.cpp | 2 +- .../ondemand_extreme_rto/dispatcher.cpp | 2 +- .../transam/parallel_recovery/dispatcher.cpp | 2 +- .../replication/logical/ddlmessage.cpp | 2 +- .../replication/logical/ddltrigger.cpp | 284 +- .../storage/replication/logical/decode.cpp | 42 + .../storage/replication/logical/logical.cpp | 49 + .../replication/logical/logical_parse.cpp | 2 + .../storage/replication/logical/proto.cpp | 53 + .../replication/logical/reorderbuffer.cpp | 42 +- .../storage/replication/logical/worker.cpp | 74 +- .../storage/replication/pgoutput/pgoutput.cpp | 164 +- src/include/access/htup.h | 22 + src/include/catalog/heap.h | 47 +- src/include/catalog/pg_publication.h | 8 +- .../rollback-post_catalog_maindb_92_949.sql | 1 + .../rollback-post_catalog_otherdb_92_949.sql | 1 + .../upgrade-post_catalog_maindb_92_949.sql | 6 + .../upgrade-post_catalog_otherdb_92_949.sql | 6 + src/include/commands/defrem.h | 8 +- src/include/commands/event_trigger.h | 7 +- src/include/commands/tablecmds.h | 5 + src/include/knl/knl_session.h | 1 + src/include/miscadmin.h | 1 + src/include/nodes/parsenodes.h | 1 + src/include/nodes/parsenodes_common.h | 3 + src/include/replication/ddlmessage.h | 9 +- src/include/replication/logicalproto.h | 4 + src/include/replication/output_plugin.h | 12 + src/include/replication/reorderbuffer.h | 26 +- src/include/tcop/ddldeparse.h | 65 + src/include/tcop/deparse_utility.h | 2 + src/include/utils/rel.h | 3 + src/test/regress/expected/object_address.out | 4 +- .../regress/expected/on_update_session2.out | 2 +- src/test/regress/output/publication.source | 4 +- src/test/regress/pg_regress.cpp | 2 +- src/test/subscription/schedule | 2 +- .../A/acceptable_diff/create_table.diff | 25 - .../ddl_replication_sql/A/create_table.sql | 21 + .../A/ddl_alter_function.setup | 9 + .../A/ddl_alter_function.sql | 55 + .../A/ddl_alter_function.teardown | 8 + .../ddl_replication_sql/A/ddl_alter_table.sql | 2418 ++++++++++ .../A/ddl_alter_table_001.sql | 785 ++++ .../A/ddl_alter_table_002.sql | 2011 ++++++++ .../A/ddl_alter_table_fastcheck.setup | 11 + .../A/ddl_alter_table_fastcheck.sql | 692 +++ .../A/ddl_alter_table_fastcheck.teardown | 19 + .../A/ddl_alter_table_rewrite.setup | 9 + .../A/ddl_alter_table_rewrite.sql | 26 + .../A/ddl_alter_table_rewrite.teardown | 8 + .../A/ddl_alter_table_subpartition.setup | 24 + .../A/ddl_alter_table_subpartition.sql | 2156 +++++++++ .../A/ddl_create_trigger.sql | 100 + .../ddl_replication_sql/A/ddl_create_type.sql | 5 + .../ddl_replication_sql/A/ddl_drop_type.sql | 13 + .../A/ddl_subpartition_tablespace.setup | 20 + .../A/ddl_subpartition_tablespace.sql | 1013 +++++ .../ddl_replication_sql/A/ddl_view_def.sql | 36 + .../acceptable_diff/ddl_alter_table_002.diff | 4 + .../ddl_alter_table_rewrite.diff | 4 + .../ddl_replication_sql/B/create_table.sql | 8 +- .../B/ddl_alter_function.setup | 9 + .../B/ddl_alter_function.sql | 67 + .../B/ddl_alter_function.teardown | 8 + .../B/ddl_alter_schema.setup | 9 + .../B/ddl_alter_schema.sql | 15 + .../B/ddl_alter_schema.teardown | 8 + .../ddl_replication_sql/B/ddl_alter_table.sql | 2358 ++++++++++ .../B/ddl_alter_table_001.sql | 789 ++++ .../B/ddl_alter_table_002.sql | 2011 ++++++++ .../B/ddl_alter_table_fastcheck.setup | 15 + .../B/ddl_alter_table_fastcheck.sql | 930 ++++ .../B/ddl_alter_table_fastcheck.teardown | 19 + .../B/ddl_alter_table_rewrite.setup | 9 + .../B/ddl_alter_table_rewrite.sql | 26 + .../B/ddl_alter_table_rewrite.teardown | 8 + .../B/ddl_alter_table_subpartition.setup | 24 + .../B/ddl_alter_table_subpartition.sql | 2242 +++++++++ .../B/ddl_create_trigger.sql | 109 + .../ddl_replication_sql/B/ddl_create_type.sql | 2 + .../ddl_replication_sql/B/ddl_drop_type.sql | 13 + .../B/ddl_subpartition_tablespace.setup | 20 + .../B/ddl_subpartition_tablespace.sql | 1052 +++++ .../ddl_replication_sql/B/ddl_view_def.sql | 36 + .../testcase/dump_expected/dump_db_puball.pub | 3 +- .../testcase/dump_expected/dump_db_puball.sub | 1 - .../dump_expected/dump_db_pubtable.pub | 1 - .../dump_expected/dump_db_pubtable.sub | 1 - 120 files changed, 25361 insertions(+), 648 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_949.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_949.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_949.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_949.sql delete mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_001.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_002.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_trigger.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_type.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_drop_type.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_view_def.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_002.diff create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_rewrite.diff create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_001.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_002.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_trigger.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_type.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_drop_type.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_view_def.sql diff --git a/contrib/mppdb_decoding/mppdb_decoding.cpp b/contrib/mppdb_decoding/mppdb_decoding.cpp index 2734c3e1c3..4953f24e5c 100644 --- a/contrib/mppdb_decoding/mppdb_decoding.cpp +++ b/contrib/mppdb_decoding/mppdb_decoding.cpp @@ -438,9 +438,19 @@ static char *mppdb_deparse_command_type(DeparsedCommandType cmdtype) case DCT_SimpleCmd: return "Simple"; case DCT_TableDropStart: - return "Drop table"; + return "Drop Table"; case DCT_TableDropEnd: return "Drop Table End"; + case DCT_TableAlter: + return "Alter Table"; + case DCT_ObjectCreate: + return "Create Object"; + case DCT_ObjectDrop: + return "Drop Object"; + case DCT_TypeDropStart: + return "Drop Type"; + case DCT_TypeDropEnd: + return "Drop Type End"; default: Assert(false); } @@ -476,7 +486,7 @@ static void pg_decode_ddl(LogicalDecodingContext *ctx, sz, message); - if (cmdtype != DCT_TableDropStart) { + if (cmdtype != DCT_TableDropStart && cmdtype != DCT_TypeDropStart) { char *tmp = pstrdup(message); char *owner = NULL; char *decodestring = deparse_ddl_json_to_string(tmp, &owner); diff --git a/contrib/sql_decoding/sql_decoding.cpp b/contrib/sql_decoding/sql_decoding.cpp index 065d8466f5..f545ec6a77 100644 --- a/contrib/sql_decoding/sql_decoding.cpp +++ b/contrib/sql_decoding/sql_decoding.cpp @@ -44,6 +44,7 @@ #include "utils/typcache.h" #include "replication/output_plugin.h" #include "replication/logical.h" +#include "tcop/ddldeparse.h" PG_MODULE_MAGIC; @@ -59,6 +60,15 @@ static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* t static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); +static void pg_decode_truncate(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + int nrelations, Relation relations[], + ReorderBufferChange *change); +static void pg_decode_ddl(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, XLogRecPtr message_lsn, + const char *prefix, Oid relid, + DeparsedCommandType cmdtype, + Size sz, const char *message); typedef struct { MemoryContext context; @@ -78,6 +88,8 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; cb->commit_cb = pg_decode_commit_txn; + cb->truncate_cb = pg_decode_truncate; + cb->ddl_cb = pg_decode_ddl; cb->abort_cb = pg_decode_abort_txn; cb->filter_by_origin_cb = pg_decode_filter; cb->shutdown_cb = pg_decode_shutdown; @@ -507,3 +519,117 @@ static void pg_decode_change( OutputPluginWrite(ctx, true); } + + +static void pg_decode_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + TestDecodingData *data; + MemoryContext old; + int i; + + data = (TestDecodingData*)ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + + OutputPluginPrepareWrite(ctx, true); + + appendStringInfoString(ctx->out, "table "); + + for (i = 0; i < nrelations; i++) { + if (i > 0) + appendStringInfoString(ctx->out, ", "); + + appendStringInfoString(ctx->out, + quote_qualified_identifier(get_namespace_name(relations[i]->rd_rel->relnamespace), + NameStr(relations[i]->rd_rel->relname))); + } + + appendStringInfoString(ctx->out, ": TRUNCATE:"); + + if (change->data.truncate.restart_seqs + || change->data.truncate.cascade) { + if (change->data.truncate.restart_seqs) + appendStringInfo(ctx->out, " restart_seqs"); + if (change->data.truncate.cascade) + appendStringInfo(ctx->out, " cascade"); + } else + appendStringInfoString(ctx->out, " (no-flags)"); + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} + +static char* deparse_command_type(DeparsedCommandType cmdtype) +{ + switch (cmdtype) { + case DCT_SimpleCmd: + return "Simple"; + case DCT_TableDropStart: + return "Drop Table"; + case DCT_TableDropEnd: + return "Drop Table End"; + case DCT_TableAlter: + return "Alter Table"; + case DCT_ObjectCreate: + return "Create Object"; + case DCT_ObjectDrop: + return "Drop Object"; + case DCT_TypeDropStart: + return "Drop Type"; + case DCT_TypeDropEnd: + return "Drop Type End"; + default: + Assert(false); + } + return NULL; +} + +static void pg_decode_ddl(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, XLogRecPtr message_lsn, + const char *prefix, Oid relid, + DeparsedCommandType cmdtype, + Size sz, const char *message) +{ + TestDecodingData *data; + MemoryContext old; + + data = (TestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + OutputPluginPrepareWrite(ctx, true); + + appendStringInfo(ctx->out, "message: prefix %s, relid %u, cmdtype: %s, sz: %lu content: %s", + prefix, + relid, + deparse_command_type(cmdtype), + sz, + message); + if (cmdtype != DCT_TableDropStart) { + char *tmp = pstrdup(message); + char* owner = NULL; + char* decodestring = deparse_ddl_json_to_string(tmp, &owner); + appendStringInfo(ctx->out, "\ndecode to : %s, [owner %s]", decodestring, owner ? owner : "none"); + pfree(tmp); + } + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} \ No newline at end of file diff --git a/contrib/test_decoding/test_decoding.cpp b/contrib/test_decoding/test_decoding.cpp index 7f5dacfb98..2b82504d5b 100644 --- a/contrib/test_decoding/test_decoding.cpp +++ b/contrib/test_decoding/test_decoding.cpp @@ -49,6 +49,10 @@ static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); +static void pg_decode_truncate(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + int nrelations, Relation relations[], + ReorderBufferChange *change); static void pg_decode_ddl(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr message_lsn, const char *prefix, Oid relid, DeparsedCommandType cmdtype, Size sz, const char *message); @@ -65,6 +69,7 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) cb->startup_cb = pg_decode_startup; cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; + cb->truncate_cb = pg_decode_truncate; cb->commit_cb = pg_decode_commit_txn; cb->abort_cb = pg_decode_abort_txn; cb->prepare_cb = pg_decode_prepare_txn; @@ -312,15 +317,73 @@ static void pg_decode_change( OutputPluginWrite(ctx, true); } +static void pg_decode_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + PluginTestDecodingData *data; + MemoryContext old; + int i; + + data = (PluginTestDecodingData*)ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + + OutputPluginPrepareWrite(ctx, true); + + appendStringInfoString(ctx->out, "table "); + + for (i = 0; i < nrelations; i++) { + if (i > 0) + appendStringInfoString(ctx->out, ", "); + + appendStringInfoString(ctx->out, + quote_qualified_identifier(get_namespace_name(relations[i]->rd_rel->relnamespace), + NameStr(relations[i]->rd_rel->relname))); + } + + appendStringInfoString(ctx->out, ": TRUNCATE:"); + + if (change->data.truncate.restart_seqs + || change->data.truncate.cascade) { + if (change->data.truncate.restart_seqs) + appendStringInfo(ctx->out, " restart_seqs"); + if (change->data.truncate.cascade) + appendStringInfo(ctx->out, " cascade"); + } else + appendStringInfoString(ctx->out, " (no-flags)"); + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} + static char *deparse_command_type(DeparsedCommandType cmdtype) { switch (cmdtype) { case DCT_SimpleCmd: return "Simple"; case DCT_TableDropStart: - return "Drop table"; + return "Drop Table"; case DCT_TableDropEnd: return "Drop Table End"; + case DCT_TableAlter: + return "Alter Table"; + case DCT_ObjectCreate: + return "Create Object"; + case DCT_ObjectDrop: + return "Drop Object"; + case DCT_TypeDropStart: + return "Drop Type"; + case DCT_TypeDropEnd: + return "Drop Type End"; default: Assert(false); } diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 9d44a2739f..c82db46370 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -302,6 +302,7 @@ const uint32 EVENT_VERSION = 92844; const uint32 EVENT_TRIGGER_VERSION_NUM = 92845; const uint32 RB_OBJECT_VERSION_NUM = 92831; const uint32 PUBLICATION_DDL_VERSION_NUM = 92921; +const uint32 PUBLICATION_DDL_AT_VERSION_NUM = 92949; #ifdef DUMPSYSLOG char* syslogpath = NULL; @@ -4411,6 +4412,7 @@ void getPublications(Archive *fout) int i_pubinsert; int i_pubupdate; int i_pubdelete; + int i_pubtruncate = 0; int i_pubddl = 0; int i, ntups; @@ -4423,7 +4425,14 @@ void getPublications(Archive *fout) resetPQExpBuffer(query); /* Get the publications. */ - if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + if (GetVersionNum(fout) >= PUBLICATION_DDL_AT_VERSION_NUM) { + appendPQExpBuffer(query, + "SELECT p.tableoid, p.oid, p.pubname, " + "(%s p.pubowner) AS rolname, " + "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubddl " + "FROM pg_catalog.pg_publication p", + username_subquery); + } else if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, p.pubname, " "(%s p.pubowner) AS rolname, " @@ -4456,7 +4465,10 @@ void getPublications(Archive *fout) i_pubinsert = PQfnumber(res, "pubinsert"); i_pubupdate = PQfnumber(res, "pubupdate"); i_pubdelete = PQfnumber(res, "pubdelete"); - if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + if (GetVersionNum(fout) >= PUBLICATION_DDL_AT_VERSION_NUM) { + i_pubtruncate = PQfnumber(res, "pubtruncate"); + i_pubddl = PQfnumber(res, "pubddl"); + } else if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { i_pubddl = PQfnumber(res, "pubddl"); } @@ -4473,7 +4485,10 @@ void getPublications(Archive *fout) pubinfo[i].pubinsert = (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0); pubinfo[i].pubupdate = (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0); pubinfo[i].pubdelete = (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0); - if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + if (GetVersionNum(fout) >= PUBLICATION_DDL_AT_VERSION_NUM) { + pubinfo[i].pubtruncate = (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0); + pubinfo[i].pubddl = atoxid(PQgetvalue(res, i, i_pubddl)); + } else if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { pubinfo[i].pubddl = atol(PQgetvalue(res, i, i_pubddl)); } @@ -4539,6 +4554,14 @@ static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo) first = false; } + if (pubinfo->pubtruncate) { + if (!first) { + appendPQExpBufferStr(query, ", "); + } + appendPQExpBufferStr(query, "truncate"); + first = false; + } + if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM && pubinfo->pubddl != 0) { if (!first) { appendPQExpBufferStr(query, "',"); @@ -23698,7 +23721,8 @@ getEventTriggers(Archive *fout, int *numEventTriggers) static bool eventtrigger_filter(EventTriggerInfo *evtinfo) { static char *reserved_trigger_prefix[] = {PUB_EVENT_TRIG_PREFIX PUB_TRIG_DDL_CMD_END, - PUB_EVENT_TRIG_PREFIX PUB_TRIG_DDL_CMD_START}; + PUB_EVENT_TRIG_PREFIX PUB_TRIG_DDL_CMD_START, + PUB_EVENT_TRIG_PREFIX PUB_TRIG_TBL_REWRITE}; static const size_t triggerPrefixLength = sizeof(reserved_trigger_prefix) / sizeof(reserved_trigger_prefix[0]); for (size_t i = 0; i < triggerPrefixLength; ++i) { diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 0aca8de733..d32cf51f46 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -517,6 +517,7 @@ typedef struct _PublicationInfo { bool pubinsert; bool pubupdate; bool pubdelete; + bool pubtruncate; int64 pubddl; } PublicationInfo; diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 9c535f4316..f8c0ec6aab 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -9842,6 +9842,10 @@ AddFuncGroup( "publication_deparse_ddl_command_start", 1, AddBuiltinFunc(_0(4643), _1("publication_deparse_ddl_command_start"), _2(2), _3(true), _4(false), _5(publication_deparse_ddl_command_start), _6(3838), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("publication_deparse_ddl_command_start"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "publication_deparse_table_rewrite", 1, + AddBuiltinFunc(_0(4644), _1("publication_deparse_table_rewrite"), _2(2), _3(true), _4(false), _5(publication_deparse_table_rewrite), _6(3838), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("publication_deparse_table_rewrite"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "pv_builtin_functions", 1, AddBuiltinFunc(_0(5345), _1("pv_builtin_functions"), _2(0), _3(false), _4(true), _5(pv_builtin_functions), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(3100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(32, 19, 26, 26, 26, 700, 700, 26, 24, 16, 16, 16, 16, 16, 16, 18, 21, 21, 26, 30, 1007, 1002, 1009, 194, 25, 25, 1009, 1034, 22, 16, 16, 16, 26), _22(32, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(32, "proname", "pronamespace", "proowner", "prolang", "procost", "prorows", "provariadic", "protransform", "proisagg", "proiswindow", "prosecdef", "proleakproof", "proisstrict", "proretset", "provolatile", "pronargs", "pronargdefaults", "prorettype", "proargtypes", "proallargtypes", "proargmodes", "proargnames", "proargdefaults", "prosrc", "probin", "proconfig", "proacl", "prodefaultargpos", "fencedmode", "proshippable", "propackage", "oid"), _24(NULL), _25("pv_builtin_functions"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/src/common/backend/catalog/heap.cpp b/src/common/backend/catalog/heap.cpp index 59c61da5d5..927fee18ef 100644 --- a/src/common/backend/catalog/heap.cpp +++ b/src/common/backend/catalog/heap.cpp @@ -1284,6 +1284,10 @@ void InsertPgClassTuple( else values[Anum_pg_class_relreplident - 1] = REPLICA_IDENTITY_NOTHING; + if (new_rel_desc->relreplident && new_rel_desc->relreplident != REPLICA_IDENTITY_NOTHING) { + values[Anum_pg_class_relreplident - 1] = new_rel_desc->relreplident; + } + if (OidIsValid(new_rel_desc->rd_bucketoid)) { Assert(new_rel_desc->storage_type == SEGMENT_PAGE); values[Anum_pg_class_relbucket - 1] = ObjectIdGetDatum(new_rel_desc->rd_bucketoid); @@ -2590,6 +2594,15 @@ static Datum AddSegmentOption(Datum relOptions) return transformRelOptions((Datum)0, optsList, NULL, NULL, false, false); } +static Datum AddRelrewriteOption(Datum relOptions, Oid relrewrite) +{ + DefElem *def = makeDefElem(pstrdup("relrewrite"), (Node *)makeInteger(relrewrite)); + List* optsList = untransformRelOptions(relOptions); + optsList = lappend(optsList, def); + + return transformRelOptions((Datum)0, optsList, NULL, NULL, false, false); +} + Node* GetColumnRef(Node* key, bool* isExpr, bool* isFunc) { Node* result = NULL; @@ -2680,7 +2693,7 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable int oidinhcount, OnCommitAction oncommit, Datum reloptions, bool use_user_acl, bool allow_system_table_mods, PartitionState *partTableState, int8 row_compress, HashBucketInfo *bucketinfo, bool record_dependce, List *ceLst, StorageType storage_type, - LOCKMODE partLockMode, ObjectAddress *typaddress, List* depend_extend) + LOCKMODE partLockMode, ObjectAddress *typaddress, List* depend_extend, Oid relrewrite) { Relation pg_class_desc; Relation new_rel_desc; @@ -2879,6 +2892,10 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable relhasbucket = true; } + if (OidIsValid(relrewrite)) { + reloptions = AddRelrewriteOption(reloptions, relrewrite); + } + /* Get tableAmType from reloptions and relkind */ bytea* hreloptions = heap_reloptions(relkind, reloptions, false); TableAmType tam = get_tableam_from_reloptions(hreloptions, relkind, InvalidOid); @@ -2924,6 +2941,12 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable storage_type ); + if (OidIsValid(relrewrite)) { + Relation OldHeap = heap_open(relrewrite, AccessExclusiveLock); + new_rel_desc->relreplident = OldHeap->relreplident; + heap_close(OldHeap, AccessExclusiveLock); + } + /* Recode the table or other object in pg_class create time. */ PgObjectType objectType = GetPgObjectTypePgClass(relkind); if (objectType != OBJECT_TYPE_INVALID) { diff --git a/src/common/backend/catalog/pg_publication.cpp b/src/common/backend/catalog/pg_publication.cpp index c13abc28f8..55a31a453c 100644 --- a/src/common/backend/catalog/pg_publication.cpp +++ b/src/common/backend/catalog/pg_publication.cpp @@ -144,6 +144,7 @@ static Publication *GetPublication(Oid pubid) pub->pubactions.pubinsert = pubform->pubinsert; pub->pubactions.pubupdate = pubform->pubupdate; pub->pubactions.pubdelete = pubform->pubdelete; + pub->pubactions.pubtruncate = pubform->pubtruncate; pub->pubactions.pubddl = pubform->pubddl; ReleaseSysCache(tup); diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 1bf48d267e..fdb4eda87e 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -4456,6 +4456,10 @@ static ColumnDef* _copyColumnDef(const ColumnDef* from) COPY_NODE_FIELD(clientLogicColumnRef); COPY_NODE_FIELD(position); COPY_NODE_FIELD(update_default); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COPY_STRING_FIELD(initdefval); + } + return newnode; } @@ -5214,6 +5218,9 @@ static AlterTableCmd* _copyAlterTableCmd(const AlterTableCmd* from) COPY_SCALAR_FIELD(alterGPI); COPY_SCALAR_FIELD(is_first); COPY_STRING_FIELD(after_name); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COPY_SCALAR_FIELD(recursing); + } return newnode; } @@ -5683,6 +5690,9 @@ static AlterFunctionStmt* _copyAlterFunctionStmt(const AlterFunctionStmt* from) COPY_NODE_FIELD(func); COPY_NODE_FIELD(actions); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COPY_SCALAR_FIELD(isProcedure); + } return newnode; } @@ -5733,6 +5743,9 @@ static RenameStmt* _copyRenameStmt(const RenameStmt* from) COPY_SCALAR_FIELD(missing_ok); COPY_NODE_FIELD(renameTargetList); COPY_SCALAR_FIELD(renameTableflag); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COPY_SCALAR_FIELD(is_modifycolumn); + } return newnode; } diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index 9bce5df971..81c433b459 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -1167,6 +1167,9 @@ static bool _equalAlterTableCmd(const AlterTableCmd* a, const AlterTableCmd* b) COMPARE_SCALAR_FIELD(alterGPI); COMPARE_SCALAR_FIELD(is_first); COMPARE_STRING_FIELD(after_name); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COMPARE_SCALAR_FIELD(recursing); + } return true; } @@ -1617,6 +1620,9 @@ static bool _equalAlterFunctionStmt(const AlterFunctionStmt* a, const AlterFunct { COMPARE_NODE_FIELD(func); COMPARE_NODE_FIELD(actions); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COMPARE_SCALAR_FIELD(isProcedure); + } return true; } @@ -1659,6 +1665,9 @@ static bool _equalRenameStmt(const RenameStmt* a, const RenameStmt* b) COMPARE_SCALAR_FIELD(missing_ok); COMPARE_NODE_FIELD(renameTargetList); COMPARE_SCALAR_FIELD(renameTableflag); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COMPARE_SCALAR_FIELD(is_modifycolumn); + } return true; } @@ -2913,6 +2922,9 @@ static bool _equalColumnDef(const ColumnDef* a, const ColumnDef* b) COMPARE_NODE_FIELD(fdwoptions); COMPARE_NODE_FIELD(columnOptions); COMPARE_NODE_FIELD(update_default); + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + COMPARE_STRING_FIELD(initdefval); + } return true; } diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 0f3be412ec..e70984e914 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -4358,6 +4358,9 @@ static void _outColumnDef(StringInfo str, ColumnDef* node) if (t_thrd.proc->workingVersionNum >= ON_UPDATE_TIMESTAMP_VERSION_NUM) { WRITE_NODE_FIELD(update_default); } + if (t_thrd.proc->workingVersionNum >= PUBLICATION_DDL_AT_VERSION_NUM) { + WRITE_STRING_FIELD(initdefval); + } } static void _outTypeName(StringInfo str, TypeName* node) diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index f4973a3a3f..bb52f82a63 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -5889,6 +5889,9 @@ static ColumnDef* _readColumnDef() IF_EXIST(update_default) { READ_NODE_FIELD(update_default); } + IF_EXIST(initdefval) { + READ_STRING_FIELD(initdefval); + } READ_DONE(); } diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index f31301087f..d63e9923cd 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -17457,6 +17457,7 @@ AlterProcedureStmt: ALTER PROCEDURE function_with_argtypes alterfunc_opt_list opt_restrict { AlterFunctionStmt *n = makeNode(AlterFunctionStmt); + n->isProcedure = true; n->func = $3; n->actions = $4; $$ = (Node *) n; diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index 060f0731c5..773f66ec62 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -8569,6 +8569,7 @@ static void TransformModifyColumndef(CreateStmtContext* cxt, AlterTableCmd* cmd) rename->subname = cmd->name; rename->newname = def->colname; rename->missing_ok = false; + rename->is_modifycolumn = true; cxt->blist = lappend(cxt->blist, rename); } } diff --git a/src/common/backend/utils/adt/regproc.cpp b/src/common/backend/utils/adt/regproc.cpp index bf0d636994..ed86ec942e 100644 --- a/src/common/backend/utils/adt/regproc.cpp +++ b/src/common/backend/utils/adt/regproc.cpp @@ -426,7 +426,7 @@ format_procedure_internal(Oid procedure_oid, bool force_qualify) * Would this proc be found (given the right args) by regprocedurein? * If not, we need to qualify it. */ - if (FunctionIsVisible(procedure_oid)) + if (FunctionIsVisible(procedure_oid) && !force_qualify) nspname = NULL; else nspname = get_namespace_name(procform->pronamespace); diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 74732fedb6..663c59c201 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -121,6 +121,11 @@ #define PRETTYFLAG_PAREN 1 #define PRETTYFLAG_INDENT 2 +/* Standard conversion of a "bool pretty" option to detailed flags */ +#define GET_PRETTY_FLAGS(pretty) \ + ((pretty) ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT) \ + : PRETTYFLAG_INDENT) + /* Default line length for pretty-print wrapping */ #define WRAP_COLUMN_DEFAULT 79 @@ -244,10 +249,11 @@ static char* deparse_expression_pretty( bool no_alias = false); extern char* pg_get_viewdef_worker(Oid viewoid, int prettyFlags, int wrapColumn); extern char* pg_get_functiondef_worker(Oid funcid, int* headerlines); +extern char* pg_get_trigger_whenclause(Form_pg_trigger trigrec,Node* whenClause, bool pretty); static char* pg_get_triggerdef_worker(Oid trigid, bool pretty); static void decompile_column_index_array(Datum column_index_array, Oid relId, StringInfo buf); static char* pg_get_ruledef_worker(Oid ruleoid, int prettyFlags); -static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *excludeOps, bool attrsOnly, bool showTblSpc, +static char* pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid* excludeOps, bool attrsOnly, bool showTblSpc, int prettyFlags, bool dumpSchemaOnly = false, bool showPartitionLocal = true, bool showSubpartitionLocal = true); void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal); @@ -623,7 +629,27 @@ char* pg_get_viewdef_worker(Oid viewoid, int prettyFlags, int wrapColumn) char* pg_get_viewdef_string(Oid viewid) { - return pg_get_viewdef_worker(viewid, 0, -1); + StringInfoData buf; + Relation pg_rewrite; + HeapTuple ruletup; + TupleDesc rulettc; + + initStringInfo(&buf); + pg_rewrite = relation_open(RewriteRelationId, AccessShareLock); + + ruletup = SearchSysCache2(RULERELNAME, + ObjectIdGetDatum(viewid), + PointerGetDatum(ViewSelectRuleName)); + if (!HeapTupleIsValid(ruletup)) { + elog(ERROR, "cache lookup failed for rewrite rule for view with OID %u", viewid); + } + + rulettc = pg_rewrite->rd_att; + make_viewdef(&buf, ruletup, rulettc, 0, WRAP_COLUMN_DEFAULT); + ReleaseSysCache(ruletup); + relation_close(pg_rewrite, AccessShareLock); + + return buf.data; } /* @@ -3318,6 +3344,76 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty) return buf.data; } +/* + * Pass back the TriggerWhen clause of a trigger given the pg_trigger record and + * the expression tree (in nodeToString() representation) from pg_trigger.tgqual + * for the trigger's WHEN condition. + */ +char* pg_get_trigger_whenclause(Form_pg_trigger trigrec, Node* whenClause, bool pretty) +{ + StringInfoData buf; + char relkind; + deparse_context context; + deparse_namespace dpns; + RangeTblEntry *oldrte; + RangeTblEntry *newrte; + + initStringInfo(&buf); + + relkind = get_rel_relkind(trigrec->tgrelid); + + /* Build minimal OLD and NEW RTEs for the rel */ + oldrte = makeNode(RangeTblEntry); + oldrte->rtekind = RTE_RELATION; + oldrte->relid = trigrec->tgrelid; + oldrte->relkind = relkind; + oldrte->alias = makeAlias("old", NIL); + oldrte->eref = oldrte->alias; + oldrte->lateral = false; + oldrte->inh = false; + oldrte->inFromCl = true; + + newrte = makeNode(RangeTblEntry); + newrte->rtekind = RTE_RELATION; + newrte->relid = trigrec->tgrelid; + newrte->relkind = relkind; + newrte->alias = makeAlias("new", NIL); + newrte->eref = newrte->alias; + newrte->lateral = false; + newrte->inh = false; + newrte->inFromCl = true; + + /* Build two-element rtable */ + errno_t rc = memset_s(&dpns, sizeof(dpns), 0, sizeof(dpns)); + securec_check(rc, "\0", "\0"); + dpns.rtable = list_make2(oldrte, newrte); + dpns.ctes = NIL; + + + /* Set up context with one-deep namespace stack */ + context.buf = &buf; + context.namespaces = list_make1(&dpns); + context.windowClause = NIL; + context.windowTList = NIL; + context.varprefix = true; + context.prettyFlags = GET_PRETTY_FLAGS(pretty); +#ifdef PGXC + context.finalise_aggs = false; + context.sortgroup_colno = false; + context.parser_arg = NULL; +#endif /* PGXC */ + context.viewdef = false; + context.is_fqs = false; + context.wrapColumn = WRAP_COLUMN_DEFAULT; + context.indentLevel = PRETTYINDENT_STD; + context.qrw_phase = false; + context.is_upsert_clause = false; + + get_rule_expr(whenClause, &context, false); + + return buf.data; +} + /* ---------- * get_indexdef - Get the definition of an index * @@ -4560,6 +4656,17 @@ Datum pg_get_functiondef(PG_FUNCTION_ARGS) PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); } + +char* pg_get_functiondef_string(Oid funcid) +{ + char* funcdef = NULL; + int headerlines = 0; + + funcdef = pg_get_functiondef_worker(funcid, &headerlines); + + return funcdef; +} + /* * pg_get_function_arguments * Get a nicely-formatted list of arguments for a function. diff --git a/src/common/backend/utils/cache/relcache.cpp b/src/common/backend/utils/cache/relcache.cpp index f40a3db4fc..0909332a2f 100755 --- a/src/common/backend/utils/cache/relcache.cpp +++ b/src/common/backend/utils/cache/relcache.cpp @@ -2312,6 +2312,7 @@ static Relation RelationBuildDescExtended(Oid targetRelId, bool insertIt, bool b } else { relation->relreplident = CharGetDatum(datum); } + /* * initialize the relation's relation id (relation->rd_id) */ @@ -5428,6 +5429,8 @@ void RelationCacheInvalidOid(Relation relation) HeapTuple htup; Form_pg_class relp; int natts = 0; + Datum datum; + bool isnull = false; htup = SearchSysCache1(RELOID, ObjectIdGetDatum(RelationGetRelid(relation))); if (!HeapTupleIsValid(htup)) @@ -7283,6 +7286,10 @@ struct PublicationActions* GetRelationPublicationActions(Relation relation) pubactions->pubinsert |= pubform->pubinsert; pubactions->pubupdate |= pubform->pubupdate; pubactions->pubdelete |= pubform->pubdelete; + pubactions->pubtruncate |= pubform->pubtruncate; + + if (pubactions->pubddl != PUBDDL_ALL) + pubactions->pubddl |= pubform->pubddl; ReleaseSysCache(tup); @@ -7290,7 +7297,9 @@ struct PublicationActions* GetRelationPublicationActions(Relation relation) * If we know everything is replicated, there is no point to check * for other publications. */ - if (pubactions->pubinsert && pubactions->pubupdate && pubactions->pubdelete) + if (pubactions->pubinsert && pubactions->pubupdate && + pubactions->pubdelete && pubactions->pubtruncate && + pubactions->pubddl == PUBDDL_ALL) break; } @@ -8763,6 +8772,7 @@ Relation tuple_get_rel(HeapTuple pg_class_tuple, LOCKMODE lockmode, TupleDesc tu } else { relation->relreplident = CharGetDatum(datum); } + /* * If it's an index, initialize index-related information. * We modify RelationInitIndexAccessInfo interface to input index tuple which cached by ourself. diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 42231d92f8..71b73d9e47 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,12 +76,13 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92947; +const uint32 GRAND_VERSION_NUM = 92949; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 PUBLICATION_DDL_AT_VERSION_NUM = 92949; const uint32 MINMAXEXPR_CMPTYPE_VERSION_NUM = 92948; const uint32 PARTITION_NAME_VERSION_NUM = 92947; const uint32 AUDIT_SHA_VERSION_NUM = 92946; diff --git a/src/gausskernel/optimizer/commands/cluster.cpp b/src/gausskernel/optimizer/commands/cluster.cpp index ecac42c7c5..94f7d41e28 100755 --- a/src/gausskernel/optimizer/commands/cluster.cpp +++ b/src/gausskernel/optimizer/commands/cluster.cpp @@ -1474,7 +1474,11 @@ Oid make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, int lockMode) RELATION_CREATE_BUCKET(OldHeap) ? &bucketinfo : NULL, true, NULL, - RelationGetStorageType(OldHeap)); + RelationGetStorageType(OldHeap), + AccessExclusiveLock, + NULL, + NIL, + OIDOldHeap); Assert(OIDNewHeap != InvalidOid); ReleaseSysCache(tuple); @@ -3545,6 +3549,14 @@ void finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, bool is_system_catalog, bo rc = snprintf_s(NewToastName, NAMEDATALEN, NAMEDATALEN - 1, "pg_toast_%u_index", OIDOldHeap); securec_check_ss(rc, "\0", "\0"); RenameRelationInternal(toastidx, NewToastName); + + /* + * Reset the relrewrite for the toast. The command-counter + * increment is required here as we are about to update the tuple + * that is updated as part of RenameRelationInternal. + */ + CommandCounterIncrement(); + ResetRelRewrite(newrel->rd_rel->reltoastrelid); } relation_close(newrel, NoLock); } diff --git a/src/gausskernel/optimizer/commands/ddldeparse.cpp b/src/gausskernel/optimizer/commands/ddldeparse.cpp index 6ad8717d7d..548f7f6f9d 100644 --- a/src/gausskernel/optimizer/commands/ddldeparse.cpp +++ b/src/gausskernel/optimizer/commands/ddldeparse.cpp @@ -58,6 +58,7 @@ #include "catalog/pg_range.h" #include "catalog/pg_rewrite.h" #include "catalog/pg_statistic_ext.h" +#include "catalog/pg_trigger.h" #include "catalog/pg_ts_config.h" #include "catalog/pg_ts_dict.h" #include "catalog/pg_ts_parser.h" @@ -79,61 +80,72 @@ #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/guc_tables.h" #include "utils/guc.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/rel.h" +#include "utils/partitionkey.h" #include "utils/syscache.h" +#include "optimizer/clauses.h" /* Estimated length of the generated jsonb string */ static const int JSONB_ESTIMATED_LEN = 128; +/* copy from ruleutils.cpp */ +#define BEGIN_P_STR " BEGIN_B_PROC " /* used in dolphin type proc body */ +#define BEGIN_P_LEN 14 +#define BEGIN_N_STR " BEGIN " /* BEGIN_P_STR to same length */ + /* - * Before they are turned into JSONB representation, each command is - * represented as an object tree, using the structs below. + * Mark the max_volatility flag for an expression in the command. */ -typedef enum +static void mark_function_volatile(ddl_deparse_context* context, Node* expr) { - ObjTypeNull, - ObjTypeBool, - ObjTypeString, - ObjTypeArray, - ObjTypeInteger, - ObjTypeFloat, - ObjTypeObject -} ObjType; + if (context->max_volatility == PROVOLATILE_VOLATILE) { + return; + } -/* - * Represent the command as an object tree. - */ -typedef struct ObjTree + if (contain_volatile_functions(expr)) { + context->max_volatility = PROVOLATILE_VOLATILE; + return; + } + + if (context->max_volatility == PROVOLATILE_IMMUTABLE && + contain_mutable_functions(expr)) { + context->max_volatility = PROVOLATILE_STABLE; + } +} + +static void check_alter_table_rewrite_replident_change(Relation r, int attno, const char *cmd) { - slist_head params; /* Object tree parameters */ - int numParams; /* Number of parameters in the object tree */ - StringInfo fmtinfo; /* Format string of the ObjTree */ - bool present; /* Indicates if boolean value should be stored */ -} ObjTree; + Oid replidindex = RelationGetReplicaIndex(r); + if (!OidIsValid(replidindex)) { + ereport(ERROR, + (errmsg("cannot use %s command without replident index because it cannot be replicated in DDL replication", + cmd))); + } -/* - * An element of an object tree (ObjTree). - */ -typedef struct ObjElem + if (IsRelationReplidentKey(r, attno)) { + ereport(ERROR, + (errmsg("cannot use %s command to replica index attr because it cannot be replicated in DDL replication", + cmd))); + } +} + +static void check_alter_table_replident(Relation rel) { - char *name; /* Name of object element */ - ObjType objtype; /* Object type */ + if (rel->relreplident != REPLICA_IDENTITY_FULL && + !OidIsValid(RelationGetReplicaIndex(rel))) { + elog(ERROR, "this ALTER TABLE command will cause a table rewritting, " + "but the table does not have a replica identity, it cannot be replicated in DDL replication"); + } +} - union { - bool boolean; - char *string; - int64 integer; - float8 flt; - ObjTree *object; - List *array; - } value; /* Store the object value based on the object - * type */ - slist_node node; /* Used in converting back to ObjElem - * structure */ -} ObjElem; +void table_close(Relation relation, LOCKMODE lockmode) +{ + relation_close(relation, lockmode); +} /* * Reduce some unnecessary strings from the output json when verbose @@ -141,25 +153,22 @@ typedef struct ObjElem * the last DDL command. */ -static void append_format_string(ObjTree *tree, char *sub_fmt); -static void append_array_object(ObjTree *tree, char *sub_fmt, List *array); -static void append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value); -static char *append_object_to_format_string(ObjTree *tree, const char *sub_fmt); +static char* append_object_to_format_string(ObjTree *tree, const char *sub_fmt); static void append_premade_object(ObjTree *tree, ObjElem *elem); -static void append_string_object(ObjTree *tree, char *sub_fmt, char *name, - const char *value); static void append_int_object(ObjTree *tree, char *sub_fmt, int32 value); +static void append_float_object(ObjTree *tree, char *sub_fmt, float8 value); static void format_type_detailed(Oid type_oid, int32 typemod, Oid *nspid, char **typname, char **typemodstr, bool *typarray); -static ObjElem *new_object(ObjType type, char *name); -static ObjTree *new_objtree_for_qualname_id(Oid classId, Oid objectId); -static ObjTree *new_objtree(const char *fmt); -static ObjElem *new_object_object(ObjTree *value); +static ObjElem* new_object(ObjType type, char *name); +static ObjTree* new_objtree_for_qualname_id(Oid classId, Oid objectId); +static ObjTree* new_objtree(const char *fmt); +static ObjElem* new_object_object(ObjTree *value); -static ObjTree *new_objtree_VA(const char *fmt, int numobjs,...); +ObjTree* new_objtree_VA(const char *fmt, int numobjs, ...); +ObjElem* new_string_object(char *value); -static JsonbValue *objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner); +static JsonbValue* objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner); static void pg_get_indexdef_detailed(Oid indexrelid, bool global, char **index_am, char **definition, @@ -167,32 +176,76 @@ static void pg_get_indexdef_detailed(Oid indexrelid, bool global, char **tablespace, char **whereClause, bool *invisible); -static char *RelationGetColumnDefault(Relation rel, AttrNumber attno, +static char* RelationGetColumnDefault(Relation rel, AttrNumber attno, List *dpcontext, List **exprs); -static ObjTree *deparse_ColumnDef(Relation relation, List *dpcontext, - ColumnDef *coldef, List **exprs); +static ObjTree* deparse_ColumnDef(Relation relation, List *dpcontext, bool composite, + ColumnDef *coldef, bool is_alter, List **exprs); +static ObjTree* deparse_ColumnSetOptions(AlterTableCmd *subcmd); + +static ObjTree* deparse_DefElem(DefElem *elem, bool is_reset); +static ObjTree* deparse_OnCommitClause(OnCommitAction option); +static ObjTree* deparse_add_subpartition(ObjTree* ret, Oid partoid, + List *subPartitionDefState, int parkeynum, Oid* partkey_types); +static List* deparse_partition_boudaries(Oid parentoid, char reltype, char strategy, const char* partition_name, + Oid* partoid, int parkeynum, Oid* partkey_types); +static int get_partition_key_types(Oid reloid, char parttype, Oid **partkey_types); +static List* get_range_partition_maxvalues(List *boundary); +static List* get_list_partition_maxvalues(List *boundary); +static ObjTree* deparse_RelSetOptions(AlterTableCmd *subcmd); + +static inline ObjElem* deparse_Seq_Cache(sequence_values *seqdata, bool alter_table); +static inline ObjElem* deparse_Seq_Cycle(sequence_values *seqdata, bool alter_table); +static inline ObjElem* deparse_Seq_IncrementBy(sequence_values *seqdata, bool alter_table); +static inline ObjElem* deparse_Seq_Minvalue(sequence_values *seqdata, bool alter_table); +static inline ObjElem* deparse_Seq_Maxvalue(sequence_values *seqdata, bool alter_table); +static inline ObjElem* deparse_Seq_Restart(char *last_value); +static inline ObjElem* deparse_Seq_Startwith(sequence_values *seqdata, bool alter_table); +static ObjElem* deparse_Seq_OwnedBy(Oid sequenceId); +static inline ObjElem* deparse_Seq_Order(DefElem *elem); +static inline ObjElem* deparse_Seq_As(DefElem *elem); +static ObjTree* deparse_CreateFunction(Oid objectId, Node *parsetree); +static ObjTree* deparse_FunctionSet(VariableSetKind kind, char *name, char *value); +static ObjTree* deparse_CreateTrigStmt(Oid objectId, Node *parsetree); +static ObjTree* deparse_AlterTrigStmt(Oid objectId, Node *parsetree); +static ObjTree* deparse_AlterFunction(Oid objectId, Node *parsetree); + +static List* deparse_TableElements(Relation relation, List *tableElements, List *dpcontext, bool composite); +extern char* pg_get_trigger_whenclause(Form_pg_trigger trigrec, Node* whenClause, bool pretty); +extern char* pg_get_functiondef_string(Oid funcid); + +/* + * Append a boolean parameter to a tree. + */ +static void append_bool_object(ObjTree *tree, char *sub_fmt, bool value) +{ + ObjElem *param; + char *object_name = sub_fmt; + bool is_present_flag = false; + + Assert(sub_fmt); -static ObjTree *deparse_DefElem(DefElem *elem, bool is_reset); + /* + * Check if the format string is 'present' and if yes, store the boolean + * value + */ + if (strcmp(sub_fmt, "present") == 0) { + is_present_flag = true; + tree->present = value; + } -static inline ObjElem *deparse_Seq_Cache(sequence_values *seqdata, bool alter_table); -static inline ObjElem *deparse_Seq_Cycle(sequence_values * seqdata, bool alter_table); -static inline ObjElem *deparse_Seq_IncrementBy(sequence_values * seqdata, bool alter_table); -static inline ObjElem *deparse_Seq_Minvalue(sequence_values * seqdata, bool alter_table); -static inline ObjElem *deparse_Seq_Maxvalue(sequence_values * seqdata, bool alter_table); -static inline ObjElem *deparse_Seq_Restart(char *last_value); -static inline ObjElem *deparse_Seq_Startwith(sequence_values * seqdata, bool alter_table); -static ObjElem *deparse_Seq_OwnedBy(Oid sequenceId); -static inline ObjElem *deparse_Seq_Order(DefElem *elem); -static inline ObjElem *deparse_Seq_As(DefElem *elem); + if (!is_present_flag) + object_name = append_object_to_format_string(tree, sub_fmt); -static List *deparse_TableElements(Relation relation, List *tableElements, List *dpcontext); + param = new_object(ObjTypeBool, object_name); + param->value.boolean = value; + append_premade_object(tree, param); +} /* * Append an int32 parameter to a tree. */ -static void -append_int_object(ObjTree *tree, char *sub_fmt, int32 value) +static void append_int_object(ObjTree *tree, char *sub_fmt, int32 value) { ObjElem *param; char *object_name; @@ -206,29 +259,47 @@ append_int_object(ObjTree *tree, char *sub_fmt, int32 value) append_premade_object(tree, param); } +/* + * Append a float8 parameter to a tree. + */ +static void append_float_object(ObjTree *tree, char *sub_fmt, float8 value) +{ + ObjElem *param; + char *object_name; + + Assert(sub_fmt); + + object_name = append_object_to_format_string(tree, sub_fmt); + + param = new_object(ObjTypeFloat, object_name); + param->value.flt = value; + append_premade_object(tree, param); +} + /* * Append a NULL-or-quoted-literal clause. Userful for COMMENT and SECURITY * LABEL. - * + * * Verbose syntax * %{null}s %{literal}s */ -static void -append_literal_or_null(ObjTree *parent, char *elemname, char *value) +static void append_literal_or_null(ObjTree *parent, char *elemname, char *value) { ObjTree *top; ObjTree *part; top = new_objtree(""); part = new_objtree_VA("NULL", 1, - "present", ObjTypeBool, !value); + "present", ObjTypeBool, !value); append_object_object(top, "%{null}s", part); part = new_objtree_VA("", 1, - "present", ObjTypeBool, value != NULL); + "present", ObjTypeBool, value != NULL); - if (value) + if (value) { append_string_object(part, "%{value}L", "value", value); + } + append_object_object(top, "%{literal}s", part); append_object_object(parent, elemname, top); @@ -237,16 +308,16 @@ append_literal_or_null(ObjTree *parent, char *elemname, char *value) /* * Append an array parameter to a tree. */ -static void -append_array_object(ObjTree *tree, char *sub_fmt, List *array) +void append_array_object(ObjTree *tree, char *sub_fmt, List *array) { ObjElem *param; char *object_name; Assert(sub_fmt); - if (!array || list_length(array) == 0) + if (!array || list_length(array) == 0) { return; + } ListCell *lc; @@ -270,30 +341,49 @@ append_array_object(ObjTree *tree, char *sub_fmt, List *array) /* * Append the input format string to the ObjTree. */ -static void -append_format_string(ObjTree *tree, char *sub_fmt) +void append_format_string(ObjTree *tree, char *sub_fmt) { int len; char *fmt; - if (tree->fmtinfo == NULL) + if (tree->fmtinfo == NULL) { return; + } fmt = tree->fmtinfo->data; len = tree->fmtinfo->len; /* Add a separator if necessary */ - if (len > 0 && fmt[len - 1] != ' ') + if (len > 0 && fmt[len - 1] != ' ') { appendStringInfoSpaces(tree->fmtinfo, 1); - + } appendStringInfoString(tree->fmtinfo, sub_fmt); } +/* + * Append present as false to a tree. + * If sub_fmt is passed and verbose mode is ON, + * append sub_fmt as well to tree. + * + * Example: + * in non-verbose mode, element will be like: + * "collation": {"fmt": "COLLATE", "present": false} + * in verbose mode: + * "collation": {"fmt": "COLLATE %{name}D", "present": false} + */ +static void append_not_present(ObjTree *tree, char *sub_fmt) +{ + if (sub_fmt) { + append_format_string(tree, sub_fmt); + } + + append_bool_object(tree, "present", false); +} + /* * Append an object parameter to a tree. */ -static void -append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value) +void append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value) { ObjElem *param; char *object_name; @@ -314,8 +404,7 @@ append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value) * Return the object name which is extracted from the input "*%{name[:.]}*" * style string. And append the input format string to the ObjTree. */ -static char * -append_object_to_format_string(ObjTree *tree, const char *sub_fmt) +static char* append_object_to_format_string(ObjTree *tree, const char *sub_fmt) { StringInfoData object_name; const char *end_ptr, *start_ptr; @@ -354,8 +443,7 @@ append_object_to_format_string(ObjTree *tree, const char *sub_fmt) /* * Append a preallocated parameter to a tree. */ -static inline void -append_premade_object(ObjTree *tree, ObjElem *elem) +static inline void append_premade_object(ObjTree *tree, ObjElem *elem) { slist_push_head(&tree->params, &elem->node); tree->numParams++; @@ -364,9 +452,8 @@ append_premade_object(ObjTree *tree, ObjElem *elem) /* * Append a string parameter to a tree. */ -static void -append_string_object(ObjTree *tree, char *sub_fmt, char *name, - const char *value) +void append_string_object(ObjTree *tree, char *sub_fmt, char *name, + const char *value) { ObjElem *param; @@ -398,10 +485,9 @@ append_string_object(ObjTree *tree, char *sub_fmt, char *name, * We don't try to decode type names to their standard-mandated names, except * in the cases of types with unusual typmod rules. */ -static void -format_type_detailed(Oid type_oid, int32 typemod, - Oid *nspid, char **typname, char **typemodstr, - bool *typearray) +static void format_type_detailed(Oid type_oid, int32 typemod, + Oid *nspid, char **typname, char **typemodstr, + bool *typearray) { HeapTuple tuple; Form_pg_type typeform; @@ -484,8 +570,7 @@ format_type_detailed(Oid type_oid, int32 typemod, /* * Return the string representation of the given RELPERSISTENCE value. */ -static inline char * -get_persistence_str(char persistence) +static inline char* get_persistence_str(char persistence) { switch (persistence) { case RELPERSISTENCE_TEMP: @@ -505,8 +590,7 @@ get_persistence_str(char persistence) /* * Return the string representation of the given storagetype value. */ -static inline char * -get_type_storage(char storagetype) +static inline char* get_type_storage(char storagetype) { switch (storagetype) { case 'p': @@ -525,8 +609,7 @@ get_type_storage(char storagetype) /* * Allocate a new parameter. */ -static ObjElem * -new_object(ObjType type, char *name) +static ObjElem* new_object(ObjType type, char *name) { ObjElem *param; @@ -540,8 +623,7 @@ new_object(ObjType type, char *name) /* * Allocate a new object parameter. */ -static ObjElem * -new_object_object(ObjTree *value) +static ObjElem* new_object_object(ObjTree *value) { ObjElem *param; @@ -554,8 +636,7 @@ new_object_object(ObjTree *value) /* * Allocate a new object tree to store parameter values. */ -static ObjTree * -new_objtree(const char *fmt) +static ObjTree* new_objtree(const char *fmt) { ObjTree *params; @@ -581,8 +662,7 @@ new_objtree(const char *fmt) * be quoted as an identifier or not, which is not something that this routine * concerns itself with; that will be up to the expand function. */ -static ObjTree * -new_objtree_for_qualname(Oid nspid, char *name) +static ObjTree* new_objtree_for_qualname(Oid nspid, char *name) { ObjTree *qualified; char *namespc; @@ -603,8 +683,7 @@ new_objtree_for_qualname(Oid nspid, char *name) * A helper routine to set up %{}D and %{}O elements, with the object specified * by classId/objId. */ -static ObjTree * -new_objtree_for_qualname_id(Oid classId, Oid objectId) +static ObjTree* new_objtree_for_qualname_id(Oid classId, Oid objectId) { ObjTree *qualified; Relation catalog; @@ -625,12 +704,12 @@ new_objtree_for_qualname_id(Oid classId, Oid objectId) Anum_namespace = get_object_attnum_namespace(classId); obj_nsp = heap_getattr(catobj, Anum_namespace, RelationGetDescr(catalog), - &isnull); + &isnull); if (isnull) elog(ERROR, "null namespace for object %u", objectId); obj_name = heap_getattr(catobj, Anum_name, RelationGetDescr(catalog), - &isnull); + &isnull); if (isnull) elog(ERROR, "null attribute name for object %u", objectId); @@ -641,11 +720,25 @@ new_objtree_for_qualname_id(Oid classId, Oid objectId) return qualified; } +static ObjTree* new_objtree_for_qualname_rangevar(RangeVar* rv) +{ + ObjTree *qualified = NULL; + if (rv->schemaname) { + qualified = new_objtree_VA(NULL, 2, + "schemaname", ObjTypeString, rv->schemaname, + "objname", ObjTypeString, pstrdup(rv->relname)); + } else { + /* serachpath has no schema set in deparse_utility_command */ + Oid reloid = RangeVarGetRelid(rv, AccessExclusiveLock, false); + qualified = new_objtree_for_qualname_id(RelationRelationId, reloid); + } + return qualified; +} + /* * A helper routine to setup %{}T elements. */ -static ObjTree * -new_objtree_for_type(Oid typeId, int32 typmod) +static ObjTree* new_objtree_for_type(Oid typeId, int32 typmod) { Oid typnspid; char *type_nsp; @@ -683,8 +776,7 @@ new_objtree_for_type(Oid typeId, int32 typmod) * Note we don't have the luxury of sprintf-like compiler warnings for * malformed argument lists. */ -static ObjTree * -new_objtree_VA(const char *fmt, int numobjs,...) +ObjTree* new_objtree_VA(const char *fmt, int numobjs, ...) { ObjTree *tree; va_list args; @@ -743,30 +835,125 @@ new_objtree_VA(const char *fmt, int numobjs,...) return tree; } +/* + * Allocate a new string object. + */ +ObjElem* new_string_object(char *value) +{ + ObjElem *param; + + Assert(value); + + param = new_object(ObjTypeString, NULL); + param->value.string = value; + + return param; +} + +static ObjTree* deparse_AlterSchemaStmt(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + AlterSchemaStmt *stmt = (AlterSchemaStmt *) parsetree; + + bool setblockchain = false; + + if (stmt->charset == PG_INVALID_ENCODING && !stmt->collate) { + setblockchain = true; + } + + ret = new_objtree_VA("ALTER SCHEMA %{schemaname}I", 1, + "schemaname", ObjTypeString, stmt->schemaname); + + if (setblockchain) { + append_string_object(ret, "%{with}s BLOCKCHAIN", "with", stmt->hasBlockChain ? + "WITH" : "WITHOUT"); + } else { + if (stmt->charset != PG_INVALID_ENCODING) { + append_string_object(ret, "CHARACTER SET = %{charset}s", "charset", + pg_encoding_to_char(stmt->charset)); + } + + if (stmt->collate) { + append_string_object(ret, "COLLATE = %{collate}s", "collate", stmt->collate); + } + } + + return ret; +} + + +/* + * Deparse a CreateSchemaStmt. + * + * Given a schema OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * Verbose syntax + * CREATE SCHEMA %{if_not_exists}s %{name}I %{authorization}s +*/ +static ObjTree* deparse_CreateSchemaStmt(Oid objectId, Node *parsetree) +{ + CreateSchemaStmt *node = (CreateSchemaStmt *) parsetree; + ObjTree *ret; + ObjTree *auth; + ObjTree *blockchain; + ret = new_objtree_VA("CREATE SCHEMA %{if_not_exists}s %{name}I", 2, + "if_not_exists", ObjTypeString, + node->missing_ok ? "IF NOT EXISTS" : "", + "name", ObjTypeString, + node->schemaname ? node->schemaname : ""); + + auth = new_objtree("AUTHORIZATION"); + if (node->authid) + append_string_object(auth, "%{authorization_role}I", + "authorization_role", + node->authid); + else + append_not_present(auth, "%{authorization_role}I"); + + append_object_object(ret, "%{authorization}s", auth); + + blockchain = new_objtree("WITH BLOCKCHAIN"); + if (!node->hasBlockChain) + append_not_present(blockchain, "%{blockchain}s"); + append_object_object(ret, "%{blockchain}s", blockchain); + + return ret; +} + /* * Return the given object type as a string. * * If isgrant is true, then this function is called while deparsing GRANT * statement and some object names are replaced. */ -static const char * -string_objtype(ObjectType objtype, bool isgrant) +static const char* string_objtype(ObjectType objtype, bool isgrant) { switch (objtype) { case OBJECT_COLUMN: return isgrant ? "TABLE" : "COLUMN"; case OBJECT_DOMAIN: return "DOMAIN"; + case OBJECT_FUNCTION: + return "FUNCTION"; case OBJECT_INDEX: return "INDEX"; + case OBJECT_SCHEMA: + return "SCHEMA"; case OBJECT_SEQUENCE: return "SEQUENCE"; case OBJECT_LARGE_SEQUENCE: return "LARGE SEQUENCE"; case OBJECT_TABLE: return "TABLE"; + case OBJECT_TABLESPACE: + return "TABLESPACE"; + case OBJECT_TRIGGER: + return "TRIGGER"; case OBJECT_TYPE: return "TYPE"; + case OBJECT_VIEW: + return "VIEW"; default: elog(WARNING, "unsupported object type %d for string", objtype); } @@ -778,8 +965,7 @@ string_objtype(ObjectType objtype, bool isgrant) * Process the pre-built format string from the ObjTree into the output parse * state. */ -static void -objtree_fmt_to_jsonb_element(JsonbParseState *state, ObjTree *tree) +static void objtree_fmt_to_jsonb_element(JsonbParseState *state, ObjTree *tree) { JsonbValue key; JsonbValue val; @@ -805,8 +991,7 @@ objtree_fmt_to_jsonb_element(JsonbParseState *state, ObjTree *tree) /* * Process the role string into the output parse state. */ -static void -role_to_jsonb_element(JsonbParseState *state, char *owner) +static void role_to_jsonb_element(JsonbParseState *state, char *owner) { JsonbValue key; JsonbValue val; @@ -832,8 +1017,7 @@ role_to_jsonb_element(JsonbParseState *state, char *owner) /* * Create a JSONB representation from an ObjTree and its owner (if given). */ -static Jsonb * -objtree_to_jsonb(ObjTree *tree, char *owner) +static Jsonb* objtree_to_jsonb(ObjTree *tree, char *owner) { JsonbValue *value; @@ -845,9 +1029,8 @@ objtree_to_jsonb(ObjTree *tree, char *owner) * Helper for objtree_to_jsonb: process an individual element from an object or * an array into the output parse state. */ -static void -objtree_to_jsonb_element(JsonbParseState *state, ObjElem *object, - int elem_token) +static void objtree_to_jsonb_element(JsonbParseState *state, ObjElem *object, + int elem_token) { JsonbValue val; @@ -926,8 +1109,7 @@ objtree_to_jsonb_element(JsonbParseState *state, ObjElem *object, /* * Recursive helper for objtree_to_jsonb. */ -static JsonbValue * -objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner) +static JsonbValue* objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner) { slist_iter iter; @@ -964,8 +1146,7 @@ objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner) * * Note that CONSTRAINT_FOREIGN constraints are always ignored. */ -static List * -obtainConstraints(List *elements, Oid relationId) +static List* obtainConstraints(List *elements, Oid relationId) { Relation conRel; ScanKeyData skey[1]; @@ -981,8 +1162,8 @@ obtainConstraints(List *elements, Oid relationId) * relation. */ conRel = relation_open(ConstraintRelationId, AccessShareLock); - ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(relationId)); + ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(relationId)); scan = systable_beginscan(conRel, ConstraintRelidIndexId, true, NULL, 1, skey); /* @@ -1067,14 +1248,13 @@ obtainConstraints(List *elements, Oid relationId) * control flow is different enough that it doesn't seem worth keeping them * together. */ -static void -pg_get_indexdef_detailed(Oid indexrelid, bool global, - char **index_am, - char **definition, - char **reloptions, - char **tablespace, - char **whereClause, - bool *invisible) +static void pg_get_indexdef_detailed(Oid indexrelid, bool global, + char **index_am, + char **definition, + char **reloptions, + char **tablespace, + char **whereClause, + bool *invisible) { HeapTuple ht_idx; HeapTuple ht_idxrel; @@ -1221,6 +1401,8 @@ pg_get_indexdef_detailed(Oid indexrelid, bool global, if (indexkey && IsA(indexkey, FuncExpr) && ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL) { appendStringInfoString(&definitionBuf, str); + } else if (indexkey && IsA(indexkey, PrefixKey)) { + appendStringInfoString(&definitionBuf, str); } else { appendStringInfo(&definitionBuf, "(%s)", str); } @@ -1234,7 +1416,7 @@ pg_get_indexdef_detailed(Oid indexrelid, bool global, Oid indcoll = indcollation->values[keyno]; if (OidIsValid(indcoll)) appendStringInfo(&definitionBuf, " COLLATE %s", - generate_collation_name((indcoll))); + generate_collation_name((indcoll))); /* Add the operator class name, even if default */ get_opclass_name(indclass->values[keyno], InvalidOid, &definitionBuf); @@ -1316,9 +1498,8 @@ pg_get_indexdef_detailed(Oid indexrelid, bool global, * * Caller must have set a correct deparse context. */ -static char * -RelationGetColumnDefault(Relation rel, AttrNumber attno, List *dpcontext, - List **exprs) +static char* RelationGetColumnDefault(Relation rel, AttrNumber attno, List *dpcontext, + List **exprs) { Node *defval; char *defstr; @@ -1390,6 +1571,90 @@ static char *RelationGetColumnOnUpdate(Node *update_expr, List *dpcontext, List return buf.data; } +/* used by AT_ModifyColumn */ +static ObjTree* deparse_ColumnDef_constraints(ObjTree *ret, Relation relation, + ColumnDef *coldef, List *dpcontext, List **exprs) +{ + ObjTree *tmp_obj; + Oid relid = RelationGetRelid(relation); + ListCell *cell; + HeapTuple attrTup; + Form_pg_attribute attrForm; + + bool saw_notnull = false; + bool saw_autoincrement = false; + char* onupdate = NULL; + attrTup = SearchSysCacheAttName(relid, coldef->colname); + if (!HeapTupleIsValid(attrTup)) + elog(ERROR, "could not find cache entry for column \"%s\" of relation %u", + coldef->colname, relid); + attrForm = (Form_pg_attribute) GETSTRUCT(attrTup); + + foreach(cell, coldef->constraints) { + Constraint *constr = (Constraint *) lfirst(cell); + + if (constr->contype == CONSTR_NOTNULL) { + saw_notnull = true; + } else if (constr->contype == CONSTR_AUTO_INCREMENT) { + saw_autoincrement = true; + check_alter_table_rewrite_replident_change(relation, attrForm->attnum, "MODIFY COLUMN AUTO_INCREMENT"); + } else if (constr->contype == CONSTR_DEFAULT && constr->update_expr) { + onupdate = RelationGetColumnOnUpdate(constr->update_expr, dpcontext, exprs); + } + } + + if (coldef->is_not_null) + saw_notnull = true; + + if (saw_autoincrement) { + ReleaseSysCache(attrTup); + return ret; + } + + append_string_object(ret, "%{auto_increment}s", "auto_increment", + saw_autoincrement ? "AUTO_INCREMENT" : ""); + + /* ON UPDATE */ + append_string_object(ret, "ON UPDATE %{on_update}s", "on_update", onupdate ? onupdate : ""); + + append_string_object(ret, "%{not_null}s", "not_null", + saw_notnull ? "NOT NULL" : saw_autoincrement ? "NULL" : ""); + + /* GENERATED COLUMN EXPRESSION */ + tmp_obj = new_objtree("GENERATED ALWAYS AS"); + if (coldef->generatedCol == ATTRIBUTE_GENERATED_STORED) { + char *defstr; + + defstr = RelationGetColumnDefault(relation, attrForm->attnum, dpcontext, exprs); + append_string_object(tmp_obj, "(%{generation_expr}s) STORED", "generation_expr", defstr); + } else { + append_not_present(tmp_obj, "(%{generation_expr}s) STORED"); + } + + append_object_object(ret, "%{generated_column}s", tmp_obj); + + tmp_obj = new_objtree("DEFAULT"); + + if (attrForm->atthasdef && + coldef->generatedCol != ATTRIBUTE_GENERATED_STORED && + !saw_autoincrement && + !onupdate) { + char *defstr; + + defstr = RelationGetColumnDefault(relation, attrForm->attnum, + dpcontext, exprs); + + append_string_object(tmp_obj, "%{default}s", "default", defstr); + } else { + append_not_present(tmp_obj, "%{default}s"); + } + append_object_object(ret, "%{default}s", tmp_obj); + + ReleaseSysCache(attrTup); + return ret; +} + +static bool istypestring(Oid typid); /* * Deparse a ColumnDef node within a regular (non-typed) table creation. * @@ -1398,23 +1663,23 @@ static char *RelationGetColumnOnUpdate(Node *update_expr, List *dpcontext, List * elsewhere (the info in the parse node is incomplete anyway). * * Verbose syntax - * %{name}I %{coltype}T %{compression}s %{default}s %{not_null}s %{collation}s + * %{name}I %{coltype}T %{auto_increment} %{default}s %{not_null}s %{collation}s */ -static ObjTree * -deparse_ColumnDef(Relation relation, List *dpcontext, - ColumnDef *coldef, List **exprs) +static ObjTree* deparse_ColumnDef(Relation relation, List *dpcontext, bool composite, + ColumnDef *coldef, bool is_alter, List **exprs) { - ObjTree *ret; - Oid relid = RelationGetRelid(relation); - HeapTuple attrTup; + ObjTree *ret; + ObjTree *tmp_obj; + Oid relid = RelationGetRelid(relation); + HeapTuple attrTup; Form_pg_attribute attrForm; - Oid typid; - int32 typmod; - Oid typcollation; - bool saw_notnull; - bool saw_autoincrement; - char *onupdate = NULL; - ListCell *cell; + Oid typid; + int32 typmod; + Oid typcollation; + bool saw_notnull; + bool saw_autoincrement; + char* onupdate = NULL; + ListCell *cell; /* * Inherited columns without local definitions must not be emitted. @@ -1440,69 +1705,125 @@ deparse_ColumnDef(Relation relation, List *dpcontext, "coltype", ObjTypeObject, new_objtree_for_type(typid, typmod)); - - + tmp_obj = new_objtree("COLLATE"); if (OidIsValid(typcollation)) { - append_object_object(ret, "COLLATE %{collate}D", + append_object_object(tmp_obj, "%{name}D", new_objtree_for_qualname_id(CollationRelationId, typcollation)); + } else { + append_not_present(tmp_obj, "%{name}D"); } - - /* - * Emit a NOT NULL declaration if necessary. Note that we cannot - * trust pg_attribute.attnotnull here, because that bit is also set - * when primary keys are specified; we must not emit a NOT NULL - * constraint in that case, unless explicitly specified. Therefore, - * we scan the list of constraints attached to this column to - * determine whether we need to emit anything. (Fortunately, NOT NULL - * constraints cannot be table constraints.) - * - * In the ALTER TABLE cases, we also add a NOT NULL if the colDef is - * marked is_not_null. - */ - saw_notnull = false; - saw_autoincrement = false; + append_object_object(ret, "%{collation}s", tmp_obj); - foreach(cell, coldef->constraints) { - Constraint *constr = (Constraint *) lfirst(cell); + if (!composite) { + /* + * Emit a NOT NULL declaration if necessary. Note that we cannot + * trust pg_attribute.attnotnull here, because that bit is also set + * when primary keys are specified; we must not emit a NOT NULL + * constraint in that case, unless explicitly specified. Therefore, + * we scan the list of constraints attached to this column to + * determine whether we need to emit anything. (Fortunately, NOT NULL + * constraints cannot be table constraints.) + * + * In the ALTER TABLE cases, we also add a NOT NULL if the colDef is + * marked is_not_null. + */ + saw_notnull = false; + saw_autoincrement = false; + foreach(cell, coldef->constraints) { + Constraint *constr = (Constraint *) lfirst(cell); + + if (constr->contype == CONSTR_NOTNULL) { + saw_notnull = true; + } else if (constr->contype == CONSTR_AUTO_INCREMENT) { + saw_autoincrement = true; + } else if (constr->contype == CONSTR_DEFAULT && constr->update_expr) { + onupdate = RelationGetColumnOnUpdate(constr->update_expr, dpcontext, exprs); + } + } - if (constr->contype == CONSTR_NOTNULL) { + if (is_alter && coldef->is_not_null) saw_notnull = true; - } else if (constr->contype == CONSTR_AUTO_INCREMENT) { + + if (is_alter && !saw_autoincrement && coldef->raw_default && + IsA(coldef->raw_default, AutoIncrement)) { saw_autoincrement = true; - } else if (constr->contype == CONSTR_DEFAULT && constr->update_expr) { - onupdate = RelationGetColumnOnUpdate(constr->update_expr, dpcontext, exprs); } - } - append_string_object(ret, "%{auto_increment}s", "auto_increment", - saw_autoincrement ? "AUTO_INCREMENT" : ""); + if (is_alter && saw_autoincrement) { + check_alter_table_rewrite_replident_change(relation, attrForm->attnum, "ADD COLUMN AUTO_INCREMENT"); + /* auto_increment will be set with constraint when rewrite finish */ + ReleaseSysCache(attrTup); + return ret; + } - + append_string_object(ret, "%{auto_increment}s", "auto_increment", + saw_autoincrement ? "AUTO_INCREMENT" : ""); - append_string_object(ret, "%{not_null}s", "not_null", - saw_notnull ? "NOT NULL" : ""); + tmp_obj = new_objtree("DEFAULT"); - /* GENERATED COLUMN EXPRESSION */ - if (coldef->generatedCol == ATTRIBUTE_GENERATED_STORED) { - char *defstr = RelationGetColumnDefault(relation, attrForm->attnum, dpcontext, exprs); - append_string_object(ret, "GENERATED ALWAYS AS (%{generation_expr}s) STORED", - "generation_expr", defstr); - } + if (attrForm->atthasdef && + coldef->generatedCol != ATTRIBUTE_GENERATED_STORED && + !saw_autoincrement) { + char *defstr = NULL; - if (attrForm->atthasdef && - coldef->generatedCol != ATTRIBUTE_GENERATED_STORED && - !saw_autoincrement) { - char *defstr; + /* initdefval intend that default value is a constant expr, + * if the default can not get from initdefval, then need output dml change + * and set default after rewrite + */ + if (is_alter) { + if (coldef->initdefval) { + StringInfoData defvalbuf; + initStringInfo(&defvalbuf); + if (istypestring(typid)) + appendStringInfo(&defvalbuf, "\'%s\'", coldef->initdefval); + else + appendStringInfo(&defvalbuf, "%s", coldef->initdefval); + defstr = pstrdup(defvalbuf.data); + } else { + /* if coldef->initdefval not exist, then default is not a constant + * handle it after rewrite finish + */ + append_not_present(tmp_obj, "%{default}s"); + } + append_string_object(tmp_obj, "%{default}s", "default", defstr); + } else { + defstr = RelationGetColumnDefault(relation, attrForm->attnum, dpcontext, exprs); + if (defstr == NULL || defstr[0] == '\0') { + append_not_present(tmp_obj, "%{default}s"); + } else { + append_string_object(tmp_obj, "%{default}s", "default", defstr); + } + } + } else { + append_not_present(tmp_obj, "%{default}s"); + } + append_object_object(ret, "%{default}s", tmp_obj); - defstr = RelationGetColumnDefault(relation, attrForm->attnum, - dpcontext, exprs); + /* ON UPDATE */ + if ((!onupdate || !strlen(onupdate)) && coldef->update_default) { + onupdate = RelationGetColumnOnUpdate(coldef->update_default, dpcontext, exprs); + } + append_string_object(ret, "ON UPDATE %{on_update}s", "on_update", onupdate ? onupdate : ""); - append_string_object(ret, "DEFAULT %{default}s", "default", defstr); + if (!is_alter || saw_autoincrement) + append_string_object(ret, "%{not_null}s", "not_null", saw_notnull ? "NOT NULL" : ""); + + /* GENERATED COLUMN EXPRESSION */ + tmp_obj = new_objtree("GENERATED ALWAYS AS"); + if (coldef->generatedCol == ATTRIBUTE_GENERATED_STORED) { + char *defstr; + + defstr = RelationGetColumnDefault(relation, attrForm->attnum, + dpcontext, exprs); + append_string_object(tmp_obj, "(%{generation_expr}s) STORED", + "generation_expr", defstr); + } else { + append_not_present(tmp_obj, "(%{generation_expr}s) STORED"); + } + append_object_object(ret, "%{generated_column}s", tmp_obj); } - append_string_object(ret, "ON UPDATE %{on_update}s", "on_update", onupdate ? onupdate : ""); - ReleaseSysCache(attrTup); return ret; @@ -1515,8 +1836,7 @@ deparse_ColumnDef(Relation relation, List *dpcontext, * Verbose syntax * %{label}s = %{value}L */ -static ObjTree * -deparse_DefElem(DefElem *elem, bool is_reset) +static ObjTree* deparse_DefElem(DefElem *elem, bool is_reset) { ObjTree *ret; ObjTree *optname = new_objtree(""); @@ -1550,8 +1870,7 @@ deparse_DefElem(DefElem *elem, bool is_reset) * Verbose syntax * ON COMMIT %{on_commit_value}s */ -static ObjTree * -deparse_OnCommitClause(OnCommitAction option) +static ObjTree* deparse_OnCommitClause(OnCommitAction option) { ObjTree *ret = new_objtree("ON COMMIT"); switch (option) { @@ -1586,14 +1905,13 @@ deparse_OnCommitClause(OnCommitAction option) * OR * CACHE %{value} */ -static inline ObjElem * -deparse_Seq_Cache(sequence_values * seqdata, bool alter_table) +static inline ObjElem* deparse_Seq_Cache(sequence_values *seqdata, bool alter_table) { ObjTree *ret; const char *fmt; fmt = alter_table ? "SET CACHE %{value}s" : "CACHE %{value}s"; - + ret = new_objtree_VA(fmt, 2, "clause", ObjTypeString, "cache", "value", ObjTypeString, seqdata->cache_value); @@ -1609,14 +1927,13 @@ deparse_Seq_Cache(sequence_values * seqdata, bool alter_table) * OR * %{no}s CYCLE */ -static inline ObjElem * -deparse_Seq_Cycle(sequence_values * seqdata, bool alter_table) +static inline ObjElem* deparse_Seq_Cycle(sequence_values *seqdata, bool alter_table) { ObjTree *ret; const char *fmt; fmt = alter_table ? "SET %{no}s CYCLE" : "%{no}s CYCLE"; - + ret = new_objtree_VA(fmt, 2, "clause", ObjTypeString, "cycle", "no", ObjTypeString, @@ -1633,13 +1950,12 @@ deparse_Seq_Cycle(sequence_values * seqdata, bool alter_table) * OR * INCREMENT BY %{value}s */ -static inline ObjElem * -deparse_Seq_IncrementBy(sequence_values * seqdata, bool alter_table) +static inline ObjElem* deparse_Seq_IncrementBy(sequence_values *seqdata, bool alter_table) { ObjTree *ret; const char *fmt; - fmt = alter_table ? "SET INCREMENT BY %{value}s" : "INCREMENT BY %{value}s"; + fmt = alter_table ? "SET INCREMENT BY %{value}s" : "INCREMENT BY %{value}s"; ret = new_objtree_VA(fmt, 2, "clause", ObjTypeString, "seqincrement", @@ -1656,8 +1972,7 @@ deparse_Seq_IncrementBy(sequence_values * seqdata, bool alter_table) * OR * MAXVALUE %{value}s */ -static inline ObjElem * -deparse_Seq_Maxvalue(sequence_values * seqdata, bool alter_table) +static inline ObjElem* deparse_Seq_Maxvalue(sequence_values *seqdata, bool alter_table) { ObjTree *ret; const char *fmt; @@ -1679,13 +1994,12 @@ deparse_Seq_Maxvalue(sequence_values * seqdata, bool alter_table) * OR * MINVALUE %{value}s */ -static inline ObjElem * -deparse_Seq_Minvalue(sequence_values * seqdata, bool alter_table) +static inline ObjElem* deparse_Seq_Minvalue(sequence_values *seqdata, bool alter_table) { ObjTree *ret; const char *fmt; - fmt = alter_table ? "SET MINVALUE %{value}s" : "MINVALUE %{value}s"; + fmt = alter_table ? "SET MINVALUE %{value}s" : "MINVALUE %{value}s"; ret = new_objtree_VA(fmt, 2, "clause", ObjTypeString, "minvalue", @@ -1696,12 +2010,11 @@ deparse_Seq_Minvalue(sequence_values * seqdata, bool alter_table) /* * Deparse the sequence OWNED BY command. - * + * * Verbose syntax * OWNED BY %{owner}D */ -static ObjElem * -deparse_Seq_OwnedBy(Oid sequenceId) +static ObjElem* deparse_Seq_OwnedBy(Oid sequenceId) { ObjTree *ret = NULL; Relation depRel; @@ -1723,7 +2036,7 @@ deparse_Seq_OwnedBy(Oid sequenceId) BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(0)); - scan = systable_beginscan(depRel, DependDependerIndexId, true, + scan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 3, keys); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { @@ -1731,7 +2044,7 @@ deparse_Seq_OwnedBy(Oid sequenceId) Form_pg_depend depform; ObjTree *tmp_obj; char *colname; - + depform = (Form_pg_depend) GETSTRUCT(tuple); /* Only consider AUTO dependencies on pg_class */ @@ -1749,11 +2062,11 @@ deparse_Seq_OwnedBy(Oid sequenceId) tmp_obj = new_objtree_for_qualname_id(RelationRelationId, ownerId); append_string_object(tmp_obj, "attrname", "attrname", colname); - ret = new_objtree_VA("OWNED BY %{owner}D", 2, - "clause", ObjTypeString, "owned", - "owner", ObjTypeObject, tmp_obj); + ret = new_objtree_VA("OWNED BY %{owner}D", 2, + "clause", ObjTypeString, "owned", + "owner", ObjTypeObject, tmp_obj); } - + systable_endscan(scan); relation_close(depRel, AccessShareLock); @@ -1764,21 +2077,20 @@ deparse_Seq_OwnedBy(Oid sequenceId) if (!ret) /* XXX this shouldn't happen */ ret = new_objtree_VA("OWNED BY %{owner}D", 3, - "clause", ObjTypeString, "owned", - "owner", ObjTypeNull, - "present", ObjTypeBool, false); + "clause", ObjTypeString, "owned", + "owner", ObjTypeNull, + "present", ObjTypeBool, false); - return new_object_object(ret); + return new_object_object(ret); } /* * Deparse the sequence ORDER option. */ -static inline ObjElem * -deparse_Seq_Order(DefElem *elem) +static inline ObjElem* deparse_Seq_Order(DefElem *elem) { ObjTree *ret; - + ret = new_objtree_VA("%{order}s", 2, "clause", ObjTypeString, "order", "order", ObjTypeString, defGetBoolean(elem) ? "ORDER" : "NOORDER"); @@ -1793,8 +2105,7 @@ deparse_Seq_Order(DefElem *elem) * Verbose syntax * RESTART %{value}s */ -static inline ObjElem * -deparse_Seq_Restart(char *last_value) +static inline ObjElem* deparse_Seq_Restart(char *last_value) { ObjTree *ret; ret = new_objtree_VA("RESTART %{value}s", 2, @@ -1810,8 +2121,7 @@ deparse_Seq_Restart(char *last_value) * Verbose syntax * AS %{identity}D */ -static inline ObjElem * -deparse_Seq_As(DefElem *elem) +static inline ObjElem* deparse_Seq_As(DefElem *elem) { ObjTree *ret; Type likeType; @@ -1837,13 +2147,12 @@ deparse_Seq_As(DefElem *elem) * OR * START WITH %{value}s */ -static inline ObjElem * -deparse_Seq_Startwith(sequence_values *seqdata, bool alter_table) +static inline ObjElem* deparse_Seq_Startwith(sequence_values *seqdata, bool alter_table) { ObjTree *ret; const char *fmt; - fmt = alter_table ? "SET START WITH %{value}s" : "START WITH %{value}s"; + fmt = alter_table ? "SET START WITH %{value}s" : "START WITH %{value}s"; ret = new_objtree_VA(fmt, 2, "clause", ObjTypeString, "start", @@ -1852,6 +2161,62 @@ deparse_Seq_Startwith(sequence_values *seqdata, bool alter_table) return new_object_object(ret); } +static bool istypestring(Oid typid) +{ + switch (typid) { + case INT2OID: + case INT4OID: + case INT8OID: + case FLOAT4OID: + case FLOAT8OID: + case NUMERICOID: + /* Here we ignore infinity and NaN */ + return false; + default: + /* All other types are regarded as string. */ + return true; + } +} + +/* + * Deparse the INHERITS relations. + * + * Given a table OID, return a schema-qualified table list representing + * the parent tables. + */ +static List* deparse_InhRelations(Oid objectId) +{ + List *parents = NIL; + Relation inhRel; + SysScanDesc scan; + ScanKeyData key; + HeapTuple tuple; + + inhRel = table_open(InheritsRelationId, RowExclusiveLock); + + ScanKeyInit(&key, + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(objectId)); + + scan = systable_beginscan(inhRel, InheritsRelidSeqnoIndexId, + true, NULL, 1, &key); + + while (HeapTupleIsValid(tuple = systable_getnext(scan))) { + ObjTree *parent; + Form_pg_inherits formInh = (Form_pg_inherits) GETSTRUCT(tuple); + + parent = new_objtree_for_qualname_id(RelationRelationId, + formInh->inhparent); + parents = lappend(parents, new_object_object(parent)); + } + + systable_endscan(scan); + table_close(inhRel, RowExclusiveLock); + + return parents; +} + /* * Subroutine for CREATE TABLE deparsing. * @@ -1860,8 +2225,7 @@ deparse_Seq_Startwith(sequence_values *seqdata, bool alter_table) * Note we ignore constraints in the parse node here; they are extracted from * system catalogs instead. */ -static List * -deparse_TableElements(Relation relation, List *tableElements, List *dpcontext) +static List* deparse_TableElements(Relation relation, List *tableElements, List *dpcontext, bool composite) { List *elements = NIL; ListCell *lc; @@ -1872,7 +2236,9 @@ deparse_TableElements(Relation relation, List *tableElements, List *dpcontext) switch (nodeTag(elt)) { case T_ColumnDef: { ObjTree *tree; - tree = deparse_ColumnDef(relation, dpcontext, (ColumnDef *) elt, NULL); + tree = deparse_ColumnDef(relation, dpcontext, + composite, (ColumnDef *) elt, + false, NULL); if (tree != NULL) elements = lappend(elements, new_object_object(tree)); } @@ -1896,8 +2262,7 @@ deparse_TableElements(Relation relation, List *tableElements, List *dpcontext) * Verbose syntax * CREATE %{persistence}s SEQUENCE %{identity}D */ -static ObjTree * -deparse_CreateSeqStmt(Oid objectId, Node *parsetree) +static ObjTree* deparse_CreateSeqStmt(Oid objectId, Node *parsetree) { ObjTree *ret; Relation relation; @@ -1927,7 +2292,7 @@ deparse_CreateSeqStmt(Oid objectId, Node *parsetree) "persistence", ObjTypeString, get_persistence_str(relation->rd_rel->relpersistence), "large", ObjTypeString, seqvalues->large ? "LARGE" : "", "identity", ObjTypeObject, new_objtree_for_qualname(relation->rd_rel->relnamespace, - RelationGetRelationName(relation)), + RelationGetRelationName(relation)), "definition", ObjTypeArray, elems); relation_close(relation, AccessShareLock); @@ -1943,8 +2308,7 @@ deparse_CreateSeqStmt(Oid objectId, Node *parsetree) * Verbose syntax * ALTER SEQUENCE %{identity}D %{definition: }s */ -static ObjTree * -deparse_AlterSeqStmt(Oid objectId, Node *parsetree) +static ObjTree* deparse_AlterSeqStmt(Oid objectId, Node *parsetree) { ObjTree *ret; Relation relation; @@ -1999,35 +2363,256 @@ deparse_AlterSeqStmt(Oid objectId, Node *parsetree) "large", ObjTypeString, seqvalues->large ? "LARGE" : "", "identity", ObjTypeObject, new_objtree_for_qualname(relation->rd_rel->relnamespace, - RelationGetRelationName(relation)), + RelationGetRelationName(relation)), "definition", ObjTypeArray, elems); relation_close(relation, AccessShareLock); - return ret; + return ret; } /* - * Deparse a CommentStmt when it pertains to a constraint. + * deparse_ViewStmt + * deparse a ViewStmt + * + * Given a view OID and the parse tree that created it, return an ObjTree + * representing the creation command. * * Verbose syntax - * COMMENT ON CONSTRAINT %{identity}s ON [DOMAIN] %{parentobj}s IS %{comment}s + * CREATE %{or_replace}s %{persistence}s VIEW %{identity}D AS %{query}s */ -static ObjTree * -deparse_CommentOnConstraintSmt(Oid objectId, Node *parsetree) +static ObjTree* deparse_ViewStmt(Oid objectId, Node *parsetree) { - CommentStmt *node = (CommentStmt *) parsetree; - ObjTree *ret; - HeapTuple constrTup; - Form_pg_constraint constrForm; - ObjectAddress addr; + ViewStmt *node = (ViewStmt *) parsetree; + ObjTree *ret; + Relation relation; - Assert(node->objtype == OBJECT_TABCONSTRAINT || node->objtype == OBJECT_DOMCONSTRAINT); + relation = relation_open(objectId, AccessShareLock); - constrTup = SearchSysCache1(CONSTROID, objectId); - if (!HeapTupleIsValid(constrTup)) - elog(ERROR, "cache lookup failed for constraint with OID %u", objectId); - constrForm = (Form_pg_constraint) GETSTRUCT(constrTup); + ret = new_objtree_VA("CREATE %{or_replace}s %{persistence}s VIEW %{identity}D AS %{query}s", 4, + "or_replace", ObjTypeString, + node->replace ? "OR REPLACE" : "", + "persistence", ObjTypeString, + get_persistence_str(relation->rd_rel->relpersistence), + "identity", ObjTypeObject, + new_objtree_for_qualname(relation->rd_rel->relnamespace, + RelationGetRelationName(relation)), + "query", ObjTypeString, + pg_get_viewdef_string(objectId)); + + relation_close(relation, AccessShareLock); + return ret; +} + +/* + * Deparse a RenameStmt. + */ +static ObjTree* deparse_RenameStmt(ObjectAddress address, Node *parsetree) +{ + RenameStmt *node = (RenameStmt *) parsetree; + ObjTree *ret; + Relation relation; + Oid schemaId; + + if (node->is_modifycolumn) { + /* modify column in dbcompatibility B */ + return NULL; + } + + if (node->renameTableflag) { + /* rename table syntax in dbcompatibility B */ + ListCell *cell = NULL; + List *renamelist = NIL; + foreach (cell, node->renameTargetList) { + RenameCell* renameInfo = (RenameCell*)lfirst(cell); + RangeVar *cur = NULL; + RangeVar *ori = NULL; + Oid nspoid = InvalidOid; + Oid tbloid = InvalidOid; + ObjTree *tmp_obj = NULL; + + ori = renameInfo->original_name; + cur = renameInfo->modify_name; + + if (!cur->schemaname || !ori->schemaname) { + continue; + } + + nspoid = get_namespace_oid(cur->schemaname, false); + tbloid = get_relname_relid(cur->relname, nspoid); + if (!OidIsValid(tbloid)) { + elog(ERROR, "can not find the table %s.%s for deparse rename table", + cur->schemaname, cur->relname); + } + if (!relation_support_ddl_replication(tbloid, false)) { + continue; + } + + tmp_obj = new_objtree_VA("%{ori}D TO %{modify}D", 2, + "ori", ObjTypeObject, new_objtree_for_qualname_rangevar(ori), + "modify", ObjTypeObject, new_objtree_for_qualname_rangevar(cur)); + + renamelist = lappend(renamelist, new_object_object(tmp_obj)); + } + + if (renamelist) { + ret = new_objtree_VA("RENAME TABLE %{renamelist:, }s", 1, + "renamelist", ObjTypeArray, renamelist); + return ret; + } else { + return NULL; + } + } + + /* + * In an ALTER .. RENAME command, we don't have the original name of the + * object in system catalogs: since we inspect them after the command has + * executed, the old name is already gone. Therefore, we extract it from + * the parse node. Note we still extract the schema name from the catalog + * (it might not be present in the parse node); it cannot possibly have + * changed anyway. + */ + switch (node->renameType) { + case OBJECT_TABLE: + case OBJECT_INDEX: + case OBJECT_SEQUENCE: + case OBJECT_LARGE_SEQUENCE: + case OBJECT_VIEW: + case OBJECT_MATVIEW: + relation = relation_open(address.objectId, AccessShareLock); + schemaId = RelationGetNamespace(relation); + ret = new_objtree_VA("ALTER %{objtype}s %{if_exists}s %{identity}D RENAME TO %{newname}I", 4, + "objtype", ObjTypeString, + string_objtype(node->renameType, false), + "if_exists", ObjTypeString, + node->missing_ok ? "IF EXISTS" : "", + "identity", ObjTypeObject, + new_objtree_for_qualname(schemaId, + node->relation->relname), + "newname", ObjTypeString, + node->newname); + relation_close(relation, AccessShareLock); + break; + + case OBJECT_ATTRIBUTE: + case OBJECT_COLUMN: + relation = relation_open(address.objectId, AccessShareLock); + schemaId = RelationGetNamespace(relation); + + if (node->renameType == OBJECT_ATTRIBUTE) { + ret = new_objtree_VA("ALTER TYPE %{identity}D RENAME ATTRIBUTE %{colname}I", 2, + "identity", ObjTypeObject, + new_objtree_for_qualname(schemaId, + node->relation->relname), + "colname", ObjTypeString, node->subname); + } else { + ret = new_objtree_VA("ALTER %{objtype}s", 1, + "objtype", ObjTypeString, + string_objtype(node->relationType, false)); + + /* Composite types do not support IF EXISTS */ + if (node->renameType == OBJECT_COLUMN) + append_string_object(ret, "%{if_exists}s", + "if_exists", + node->missing_ok ? "IF EXISTS" : ""); + + append_object_object(ret, "%{identity}D", + new_objtree_for_qualname(schemaId, + node->relation->relname)); + append_string_object(ret, "RENAME COLUMN %{colname}I", + "colname", node->subname); + } + + append_string_object(ret, "TO %{newname}I", "newname", node->newname); + + if (node->renameType == OBJECT_ATTRIBUTE) + append_object_object(ret, "%{cascade}s", + new_objtree_VA("CASCADE", 1, + "present", ObjTypeBool, + node->behavior == DROP_CASCADE)); + + relation_close(relation, AccessShareLock); + break; + + case OBJECT_SCHEMA: + ret = new_objtree_VA("ALTER SCHEMA %{identity}I RENAME TO %{newname}I", 2, + "identity", ObjTypeString, node->subname, + "newname", ObjTypeString, node->newname); + break; + case OBJECT_TABCONSTRAINT: { + HeapTuple constrtup; + Form_pg_constraint constform; + + constrtup = SearchSysCache1(CONSTROID, + ObjectIdGetDatum(address.objectId)); + if (!HeapTupleIsValid(constrtup)) + elog(ERROR, "cache lookup failed for constraint with OID %u", + address.objectId); + constform = (Form_pg_constraint) GETSTRUCT(constrtup); + + ret = new_objtree_VA("ALTER TABLE %{identity}D RENAME CONSTRAINT %{oldname}I TO %{newname}I", 3, + "identity", ObjTypeObject, + new_objtree_for_qualname_id(RelationRelationId, + constform->conrelid), + "oldname", ObjTypeString, node->subname, + "newname", ObjTypeString, node->newname); + ReleaseSysCache(constrtup); + } + break; + case OBJECT_TYPE: { + HeapTuple typtup; + Form_pg_type typform; + Oid nspid; + + List* names = node->object; + char *typeName; + char *schemaname; + char *pkgName; + DeconstructQualifiedName(names, &schemaname, &typeName, &pkgName); + + typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(address.objectId)); + if (!HeapTupleIsValid(typtup)) + elog(ERROR, "cache lookup failed for type with OID %u", address.objectId); + typform = (Form_pg_type) GETSTRUCT(typtup); + nspid = typform->typnamespace; + + ret = new_objtree_VA("ALTER TYPE %{identity}D RENAME TO %{newname}I", 2, + "identity", ObjTypeObject, + new_objtree_for_qualname(nspid, + typeName), + "newname", ObjTypeString, node->newname); + + ReleaseSysCache(typtup); + } + break; + default: + elog(WARNING, "unsupported RenameStmt object type %d", node->renameType); + return NULL; + } + + return ret; +} + +/* + * Deparse a CommentStmt when it pertains to a constraint. + * + * Verbose syntax + * COMMENT ON CONSTRAINT %{identity}s ON [DOMAIN] %{parentobj}s IS %{comment}s + */ +static ObjTree* deparse_CommentOnConstraintSmt(Oid objectId, Node *parsetree) +{ + CommentStmt *node = (CommentStmt *) parsetree; + ObjTree *ret; + HeapTuple constrTup; + Form_pg_constraint constrForm; + ObjectAddress addr; + + Assert(node->objtype == OBJECT_TABCONSTRAINT || node->objtype == OBJECT_DOMCONSTRAINT); + + constrTup = SearchSysCache1(CONSTROID, objectId); + if (!HeapTupleIsValid(constrTup)) + elog(ERROR, "cache lookup failed for constraint with OID %u", objectId); + constrForm = (Form_pg_constraint) GETSTRUCT(constrTup); if (OidIsValid(constrForm->conrelid)) ObjectAddressSet(addr, RelationRelationId, constrForm->conrelid); @@ -2035,11 +2620,11 @@ deparse_CommentOnConstraintSmt(Oid objectId, Node *parsetree) ObjectAddressSet(addr, TypeRelationId, constrForm->contypid); ret = new_objtree_VA("COMMENT ON CONSTRAINT %{identity}s ON %{domain}s %{parentobj}s", 3, - "identity", ObjTypeString, pstrdup(NameStr(constrForm->conname)), - "domain", ObjTypeString, - (node->objtype == OBJECT_DOMCONSTRAINT) ? "DOMAIN" : "", - "parentobj", ObjTypeString, - getObjectIdentity(&addr)); + "identity", ObjTypeString, pstrdup(NameStr(constrForm->conname)), + "domain", ObjTypeString, + (node->objtype == OBJECT_DOMCONSTRAINT) ? "DOMAIN" : "", + "parentobj", ObjTypeString, + getObjectIdentity(&addr)); /* Add the comment clause */ append_literal_or_null(ret, "IS %{comment}s", node->comment); @@ -2057,8 +2642,7 @@ deparse_CommentOnConstraintSmt(Oid objectId, Node *parsetree) * Verbose syntax * COMMENT ON %{objtype}s %{identity}s IS %{comment}s */ -static ObjTree * -deparse_CommentStmt(ObjectAddress address, Node *parsetree) +static ObjTree* deparse_CommentStmt(ObjectAddress address, Node *parsetree) { CommentStmt *node = (CommentStmt *) parsetree; ObjTree *ret; @@ -2079,8 +2663,8 @@ deparse_CommentStmt(ObjectAddress address, Node *parsetree) } ret = new_objtree_VA("COMMENT ON %{objtype}s", 1, - "objtype", ObjTypeString, - (char *) string_objtype(node->objtype, false)); + "objtype", ObjTypeString, + (char *) string_objtype(node->objtype, false)); /* * Add the object identity clause. For zero argument aggregates we need @@ -2097,12 +2681,12 @@ deparse_CommentStmt(ObjectAddress address, Node *parsetree) procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(address.objectId)); if (!HeapTupleIsValid(procTup)) elog(ERROR, "cache lookup failed for procedure with OID %u", - address.objectId); + address.objectId); procForm = (Form_pg_proc) GETSTRUCT(procTup); if (procForm->pronargs == 0) identity = psprintf("%s(*)", quote_qualified_identifier(get_namespace_name(procForm->pronamespace), - NameStr(procForm->proname))); + NameStr(procForm->proname))); else identity = getObjectIdentity(&address); ReleaseSysCache(procTup); @@ -2118,6 +2702,47 @@ deparse_CommentStmt(ObjectAddress address, Node *parsetree) return ret; } +/* + * Deparse a CompositeTypeStmt (CREATE TYPE AS) + * + * Given a Composite type OID and the parse tree that created it, return an + * ObjTree representing the creation command. + * + * Verbose syntax + * CREATE TYPE %{identity}D AS (%{columns:, }s) + */ +static ObjTree* deparse_CompositeTypeStmt(Oid objectId, Node *parsetree) +{ + CompositeTypeStmt *node = (CompositeTypeStmt *) parsetree; + HeapTuple typtup; + Form_pg_type typform; + Relation typerel; + List *dpcontext; + List *tableelts = NIL; + + /* Find the pg_type entry and open the corresponding relation */ + typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(objectId)); + if (!HeapTupleIsValid(typtup)) + elog(ERROR, "cache lookup failed for type with OID %u", objectId); + + typform = (Form_pg_type) GETSTRUCT(typtup); + typerel = relation_open(typform->typrelid, AccessShareLock); + + dpcontext = deparse_context_for(RelationGetRelationName(typerel), + RelationGetRelid(typerel)); + + tableelts = deparse_TableElements(typerel, node->coldeflist, dpcontext, + true); /* composite type */ + + table_close(typerel, AccessShareLock); + ReleaseSysCache(typtup); + + return new_objtree_VA("CREATE TYPE %{identity}D AS (%{columns:, }s)", 2, + "identity", ObjTypeObject, + new_objtree_for_qualname_id(TypeRelationId, objectId), + "columns", ObjTypeArray, tableelts); +} + /* * Deparse an IndexStmt. * @@ -2131,8 +2756,7 @@ deparse_CommentStmt(ObjectAddress address, Node *parsetree) * %{table}D USING %{index_am}s %{definition}s %{with}s %{tablespace}s * %{where_clause}s */ -static ObjTree * -deparse_IndexStmt(Oid objectId, Node *parsetree) +static ObjTree* deparse_IndexStmt(Oid objectId, Node *parsetree) { IndexStmt *node = (IndexStmt *) parsetree; ObjTree *ret; @@ -2161,14 +2785,15 @@ deparse_IndexStmt(Oid objectId, Node *parsetree) &index_am, &definition, &reloptions, &tablespace, &whereClause, &invisible); - ret = new_objtree_VA("CREATE %{unique}s INDEX %{concurrently}s %{name}I ON %{table}D USING %{index_am}s %{definition}s", 6, - "unique", ObjTypeString, node->unique ? "UNIQUE" : "", - "concurrently", ObjTypeString, node->concurrent ? "CONCURRENTLY" : "", - "name", ObjTypeString, RelationGetRelationName(idxrel), - "table", ObjTypeObject, - new_objtree_for_qualname(heaprel->rd_rel->relnamespace, RelationGetRelationName(heaprel)), - "index_am", ObjTypeString, index_am, - "definition", ObjTypeString, definition); + ret = new_objtree_VA( + "CREATE %{unique}s INDEX %{concurrently}s %{name}I ON %{table}D USING %{index_am}s %{definition}s", 6, + "unique", ObjTypeString, node->unique ? "UNIQUE" : "", + "concurrently", ObjTypeString, node->concurrent ? "CONCURRENTLY" : "", + "name", ObjTypeString, RelationGetRelationName(idxrel), + "table", ObjTypeObject, + new_objtree_for_qualname(heaprel->rd_rel->relnamespace, RelationGetRelationName(heaprel)), + "index_am", ObjTypeString, index_am, + "definition", ObjTypeString, definition); /* reloptions */ if (reloptions) @@ -2204,8 +2829,7 @@ deparse_IndexStmt(Oid objectId, Node *parsetree) * %{inherits}s %{partition_by}s %{access_method}s %{with_clause}s * %{on_commit}s %{tablespace}s */ -static ObjTree * -deparse_CreateStmt(Oid objectId, Node *parsetree) +static ObjTree* deparse_CreateStmt(Oid objectId, Node *parsetree) { CreateStmt *node = (CreateStmt *) parsetree; Relation relation = relation_open(objectId, AccessShareLock); @@ -2219,7 +2843,7 @@ deparse_CreateStmt(Oid objectId, Node *parsetree) "persistence", ObjTypeString, get_persistence_str(relation->rd_rel->relpersistence), "if_not_exists", ObjTypeString, node->if_not_exists ? "IF NOT EXISTS" : "", "identity", ObjTypeObject, new_objtree_for_qualname(relation->rd_rel->relnamespace, - RelationGetRelationName(relation))); + RelationGetRelationName(relation))); dpcontext = deparse_context_for(RelationGetRelationName(relation), objectId); @@ -2241,13 +2865,32 @@ deparse_CreateStmt(Oid objectId, Node *parsetree) * get constraints we rely on pg_constraint, because the parse node * might be missing some things such as the name of the constraints. */ - tableelts = deparse_TableElements(relation, node->tableElts, dpcontext); + tableelts = deparse_TableElements(relation, node->tableElts, dpcontext, + false); /* not composite */ tableelts = obtainConstraints(tableelts, objectId); + tmp_obj = new_objtree(""); if (tableelts) - append_array_object(ret, "(%{table_elements:, }s)", tableelts); + append_array_object(tmp_obj, "(%{elements:, }s)", tableelts); else - append_format_string(ret, "()"); + append_format_string(tmp_obj, "()"); + + append_object_object(ret, "%{table_elements}s", tmp_obj); + + /* + * Add inheritance specification. We cannot simply scan the list of + * parents from the parser node, because that may lack the actual + * qualified names of the parent relations. Rather than trying to + * re-resolve them from the information in the parse node, it seems + * more accurate and convenient to grab it from pg_inherits. + */ + tmp_obj = new_objtree("INHERITS"); + if (node->inhRelations != NIL) { + append_array_object(tmp_obj, "(%{parents:, }D)", deparse_InhRelations(objectId)); + } else { + append_not_present(tmp_obj, "(%{parents:, }D)"); + } + append_object_object(ret, "%{inherits}s", tmp_obj); } /* AUTO_INCREMENT */ @@ -2279,7 +2922,7 @@ deparse_CreateStmt(Oid objectId, Node *parsetree) if (node->oncommit != ONCOMMIT_NOOP) append_object_object(ret, "%{on_commit}s", - deparse_OnCommitClause(node->oncommit)); + deparse_OnCommitClause(node->oncommit)); if (node->tablespacename) append_string_object(ret, "TABLESPACE %{tablespace}I", "tablespace", @@ -2298,7 +2941,7 @@ deparse_CreateStmt(Oid objectId, Node *parsetree) } } } - if (table_options) + if (table_options) append_array_object(ret, "%{options:, }s", table_options); /* opt_table_partitioning_clause */ @@ -2311,15 +2954,407 @@ deparse_CreateStmt(Oid objectId, Node *parsetree) return ret; } +/* + * +declare +null_cnt int; +start_num int; +sql text; +begin +select count(*) from %{identity}D where %{colname}I is null into null_cnt; +start_num := intmax - null_cnt; +sql := 'create large sequence %{seqname}D start with ' || start_num; +execute sql; +update %{identity}D set %{colname}I=pg_catalog.nextval('%{seqname}D') WEHRE %{colname}I IS NULL; +DROP large SEQUENCE %{seqname}D; +end; +/ + */ + +static char ADAPT_SUBSCTIPTION_AUTOINCREMENT_FMT[] = +"DECLARE\n" +"null_cnt pg_catalog.%{typname}s;\n" +"start_num pg_catalog.%{typname}s;\n" +"sql pg_catalog.text;\n" +"BEGIN\n" +"SELECT COUNT(*) FROM %{identity}D WHERE %{colname}I IS NULL INTO null_cnt;\n" +"SELECT pg_catalog.min(%{colname}I) - 1 - null_cnt FROM %{identity}D INTO start_num;\n" +"sql := 'CREATE LARGE SEQUENCE %{seqname}D START WITH ' || start_num || ' MINVALUE ' || start_num;\n" +"EXECUTE sql;\n" +"UPDATE %{identity}D SET %{colname}I = pg_catalog.nextval('%{seqname}D') WHERE %{colname}I IS NULL;\n" +"DROP LARGE SEQUENCE %{seqname}D;\n" +"END;\n"; + +static ObjTree* adapt_subscription_autoincrement_null_value(Relation rel, ColumnDef *coldef, Oid typid) +{ + ObjTree *tmp_obj = NULL; + char *maxvalue = NULL; + char *seqname = NULL; + char *typname = NULL; + sequence_values *seqvalues = get_sequence_values(RelAutoIncSeqOid(rel)); + StringInfoData string_buf; + + initStringInfo(&string_buf); + appendStringInfo(&string_buf, "ddl_replication_%s", seqvalues->sequence_name); + seqname = pstrdup(string_buf.data); + + resetStringInfo(&string_buf); + + switch (typid) { + case BOOLOID: + maxvalue = pstrdup("1"); + typname = pstrdup("bool"); + break; + case INT1OID: + appendStringInfo(&string_buf, "%u", UCHAR_MAX); + maxvalue = pstrdup(string_buf.data); + typname = pstrdup("int1"); + break; + case INT2OID: + appendStringInfo(&string_buf, "%d", SHRT_MAX); + maxvalue = pstrdup(string_buf.data); + typname = pstrdup("int2"); + break; + case INT4OID: + appendStringInfo(&string_buf, "%d", INT_MAX); + maxvalue = pstrdup(string_buf.data); + typname = pstrdup("int4"); + break; + case INT8OID : + case FLOAT4OID : + case FLOAT8OID : { + char buf[MAXINT8LEN + 1]; + pg_lltoa(PG_INT64_MAX, buf); + maxvalue = pstrdup(buf); + /* just use int8 for create sequence */ + typname = pstrdup("int8"); + } + break; + case INT16OID: { + const int MAXINT16LEN = 45; + char buf[MAXINT16LEN + 1]; + pg_i128toa(PG_INT128_MAX, buf, MAXINT16LEN + 1); + maxvalue = pstrdup(buf); + typname = pstrdup("int16"); + } + break; + default : { + appendStringInfo(&string_buf, "%d", INT_MAX); + maxvalue = pstrdup(string_buf.data); + typname = pstrdup("int4"); + break; + } + } + + tmp_obj = new_objtree_VA(ADAPT_SUBSCTIPTION_AUTOINCREMENT_FMT, 4, + "seqname", ObjTypeObject, new_objtree_for_qualname(rel->rd_rel->relnamespace, seqname), + "identity", ObjTypeObject, new_objtree_for_qualname(rel->rd_rel->relnamespace, RelationGetRelationName(rel)), + "colname", ObjTypeString, coldef->colname, + "typname", ObjTypeString, typname); + + FreeStringInfo(&string_buf); + return tmp_obj; +} + +static List* deparse_AlterRelation_add_column_default(CollectedCommand *cmd) +{ + ObjTree *ret = NULL; + ObjTree *tmp_obj = NULL; + List *dpcontext; + Relation rel; + List *subcmds = NIL; + ListCell *cell; + List *exprs = NIL; + Oid relId = cmd->d.alterTable.objectId; + AlterTableStmt *stmt = NULL; + bool isonly = false; + bool isrewrite = false; + + List *tree_list = NIL; + List *not_null_list = NIL; + List *constraint_list = NIL; + + isrewrite = cmd->d.alterTable.rewrite; + + rel = relation_open(relId, AccessShareLock); + dpcontext = deparse_context_for(RelationGetRelationName(rel), + relId); + stmt = (AlterTableStmt *) cmd->parsetree; + + if (rel->rd_rel->relkind != RELKIND_RELATION || + rel->rd_rel->relpersistence != RELPERSISTENCE_PERMANENT) { + relation_close(rel, AccessShareLock); + return NIL; + } + + if (stmt->relation && stmt->relation->inhOpt == INH_NO) { + isonly = true; + } + + foreach(cell, cmd->d.alterTable.subcmds) { + CollectedATSubcmd *sub = (CollectedATSubcmd *) lfirst(cell); + AlterTableCmd *subcmd = (AlterTableCmd *) sub->parsetree; + + if (subcmd->recursing) + continue; + + switch (subcmd->subtype) { + case AT_AddColumn: + case AT_AddColumnRecurse: { + ColumnDef *coldef = (ColumnDef*)subcmd->def; + HeapTuple attrTup; + Form_pg_attribute attrForm; + Oid typid; + int32 typmod; + Oid typcollation; + + /* do nothing */ + if (coldef->generatedCol == ATTRIBUTE_GENERATED_STORED) { + break; + } + + attrTup = SearchSysCacheAttName(relId, coldef->colname); + if (!HeapTupleIsValid(attrTup)) + elog(ERROR, "could not find cache entry for column \"%s\" of relation %u", + coldef->colname, relId); + attrForm = (Form_pg_attribute) GETSTRUCT(attrTup); + if (!attrForm->atthasdef) { + ReleaseSysCache(attrTup); + break; + } + + get_atttypetypmodcoll(relId, attrForm->attnum, &typid, &typmod, &typcollation); + + /* for auto_increment, construct the modify column clause after rewrite */ + if (coldef->raw_default && IsA(coldef->raw_default, AutoIncrement)) { + if (attrForm->attnotnull) { + tmp_obj = adapt_subscription_autoincrement_null_value(rel, coldef, typid); + if (tmp_obj) { + tree_list = lappend(tree_list, tmp_obj); + } + } + + tmp_obj = new_objtree_VA("MODIFY COLUMN %{colname}I %{coltype}T AUTO_INCREMENT", 2, + "colname", ObjTypeString, coldef->colname, + "coltype", ObjTypeObject, new_objtree_for_type(typid, typmod)); + if (!coldef->is_not_null) { + append_format_string(tmp_obj, "NULL"); + } + constraint_list = lappend(constraint_list, new_object_object(tmp_obj)); + ReleaseSysCache(attrTup); + + break; + } + + if (coldef->is_not_null) { + tmp_obj = new_objtree_VA( + "ALTER TABLE %{only}s %{identity}D ALTER COLUMN %{name}I SET NOT NULL", 3, + "only", ObjTypeString, isonly ? "ONLY" : "", + "identity", ObjTypeObject, + new_objtree_for_qualname(rel->rd_rel->relnamespace, + RelationGetRelationName(rel)), + "name", ObjTypeString, coldef->colname); + not_null_list = lappend(not_null_list, tmp_obj); + } + + char *defstr = NULL; + char *initdefval = NULL; + + if (coldef->initdefval) { + StringInfoData defvalbuf; + initStringInfo(&defvalbuf); + appendStringInfo(&defvalbuf, "\'%s\'", coldef->initdefval); + initdefval = pstrdup(defvalbuf.data); + } + defstr = RelationGetColumnDefault(rel, attrForm->attnum, dpcontext, &exprs); + if (!coldef->initdefval) { + check_alter_table_replident(rel); + ObjTree *update_ret = new_objtree_VA( + "UPDATE %{identity}D SET %{name}I = %{default}s WHERE %{name}I IS NULL", 3, + "identity", ObjTypeObject, new_objtree_for_qualname(rel->rd_rel->relnamespace, + RelationGetRelationName(rel)), + "name", ObjTypeString, coldef->colname, + "default", ObjTypeString, initdefval ? initdefval : defstr); + tree_list = lappend(tree_list, update_ret); + } + + ret = new_objtree_VA( + "ALTER TABLE %{only}s %{identity}D", 2, "only", ObjTypeString, isonly ? "ONLY" : "", "identity", + ObjTypeObject, + new_objtree_for_qualname(rel->rd_rel->relnamespace, RelationGetRelationName(rel))); + tmp_obj = new_objtree_VA("ALTER COLUMN %{name}I SET DEFAULT %{default}s", 2, "name", ObjTypeString, + coldef->colname, "default", ObjTypeString, defstr); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + append_array_object(ret, "%{subcmds:, }s", subcmds); + tree_list = lappend(tree_list, ret); + + ReleaseSysCache(attrTup); + } + break; + case AT_ModifyColumn: { + /* handle auto_increment attribute */ + AttrNumber attnum; + Oid typid; + int32 typmod; + Oid typcollation; + ColumnDef *coldef = (ColumnDef *) subcmd->def; + ListCell *lc2 = NULL; + + HeapTuple attrTup; + Form_pg_attribute attrForm; + + bool saw_autoincrement = false; + bool saw_null = false; + attrTup = SearchSysCacheAttName(relId, coldef->colname); + if (!HeapTupleIsValid(attrTup)) + elog(ERROR, "could not find cache entry for column \"%s\" of relation %u", + coldef->colname, relId); + attrForm = (Form_pg_attribute) GETSTRUCT(attrTup); + if (!attrForm->atthasdef) { + ReleaseSysCache(attrTup); + break; + } + attnum = attrForm->attnum; + get_atttypetypmodcoll(RelationGetRelid(rel), attnum, &typid, &typmod, &typcollation); + + foreach(lc2, coldef->constraints) { + Constraint *constr = (Constraint *) lfirst(lc2); + + if (constr->contype == CONSTR_AUTO_INCREMENT) { + saw_autoincrement = true; + } else if (constr->contype == CONSTR_NULL) { + saw_null = true; + } + } + + if (!saw_autoincrement) { + break; + } + + if (attrForm->attnotnull) { + tmp_obj = adapt_subscription_autoincrement_null_value(rel, coldef, typid); + if (tmp_obj) + tree_list = lappend(tree_list, tmp_obj); + } + + tmp_obj = new_objtree_VA("MODIFY COLUMN %{colname}I %{coltype}T AUTO_INCREMENT", 2, + "colname", ObjTypeString, coldef->colname, + "coltype", ObjTypeObject, new_objtree_for_type(typid, typmod)); + if (saw_null) { + append_format_string(tmp_obj, "NULL"); + } + constraint_list = lappend(constraint_list, new_object_object(tmp_obj)); + ReleaseSysCache(attrTup); + } + break; + case AT_AddIndex: { + Oid idxOid = sub->address.objectId; + IndexStmt *istmt; + Relation idx; + const char *idxname; + Oid constrOid; + istmt = (IndexStmt *) subcmd->def; + if (!istmt->isconstraint || !isrewrite) + break; + + idx = relation_open(idxOid, AccessShareLock); + idxname = RelationGetRelationName(idx); + + constrOid = get_relation_constraint_oid(cmd->d.alterTable.objectId, idxname, false); + + tmp_obj = new_objtree_VA("ADD CONSTRAINT %{name}I %{definition}s", 3, + "type", ObjTypeString, "add constraint", + "name", ObjTypeString, idxname, + "definition", ObjTypeString, + pg_get_constraintdef_part_string(constrOid)); + constraint_list = lappend(constraint_list, new_object_object(tmp_obj)); + + relation_close(idx, AccessShareLock); + } + break; + default: + break; + } + } + + if (not_null_list) { + tree_list = list_concat(tree_list, not_null_list); + } + + if (constraint_list) { + tmp_obj = new_objtree_VA("ALTER TABLE %{only}s %{identity}D", 2, + "only", ObjTypeString, isonly ? "ONLY" : "", + "identity", ObjTypeObject, + new_objtree_for_qualname(rel->rd_rel->relnamespace, + RelationGetRelationName(rel))); + append_array_object(tmp_obj, "%{subcmds:, }s", constraint_list); + tree_list = lappend(tree_list, tmp_obj); + } + + relation_close(rel, AccessShareLock); + return tree_list; +} + +List* deparse_altertable_end(CollectedCommand *cmd) +{ + OverrideSearchPath *overridePath; + MemoryContext oldcxt; + MemoryContext tmpcxt; + ObjTree *tree; + + List *command_list = NIL; + List *tree_list = NIL; + List *res = NIL; + StringInfoData str; + + if (cmd->type != SCT_AlterTable || !IsA(cmd->parsetree, AlterTableStmt)) { + return NIL; + } + + initStringInfo(&str); + tmpcxt = AllocSetContextCreate(CurrentMemoryContext, + "deparse ctx", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + oldcxt = MemoryContextSwitchTo(tmpcxt); + overridePath = GetOverrideSearchPath(CurrentMemoryContext); + overridePath->schemas = NIL; + overridePath->addCatalog = false; + overridePath->addTemp = true; + PushOverrideSearchPath(overridePath); + + tree_list = deparse_AlterRelation_add_column_default(cmd); + ListCell* lc = NULL; + foreach(lc, tree_list) { + tree = (ObjTree*)lfirst(lc); + Jsonb *jsonb; + char *command = NULL; + jsonb = objtree_to_jsonb(tree, NULL); + command = JsonbToCString(&str, VARDATA(jsonb), JSONB_ESTIMATED_LEN); + + command_list = lappend(command_list, MemoryContextStrdup(oldcxt, command)); + resetStringInfo(&str); + } + + PopOverrideSearchPath(); + + MemoryContextSwitchTo(oldcxt); + res = list_copy(command_list); + MemoryContextDelete(tmpcxt); + + return res; +} + + /* * Handle deparsing of DROP commands. * * Verbose syntax * DROP %s IF EXISTS %%{objidentity}s %{cascade}s */ -char * -deparse_drop_command(const char *objidentity, const char *objecttype, - Node *parsetree) +char* deparse_drop_command(const char *objidentity, const char *objecttype, + Node *parsetree) { DropStmt *node = (DropStmt*)parsetree; StringInfoData str; @@ -2347,13 +3382,239 @@ deparse_drop_command(const char *objidentity, const char *objecttype, return command; } +/* + * Deparse a CreateEnumStmt (CREATE TYPE AS ENUM) + * + * Given a Enum type OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * Verbose syntax + * CREATE TYPE %{identity}D AS ENUM (%{values:, }L) + */ +static ObjTree* deparse_CreateEnumStmt(Oid objectId, Node *parsetree) +{ + CreateEnumStmt *node = (CreateEnumStmt *) parsetree; + List *values = NIL; + ListCell *cell; + + foreach(cell, node->vals) { + Value *val = (Value*)lfirst(cell); + values = lappend(values, new_string_object(strVal(val))); + } + + return new_objtree_VA("CREATE TYPE %{identity}D AS ENUM (%{values:, }L)", 2, + "identity", ObjTypeObject, + new_objtree_for_qualname_id(TypeRelationId, objectId), + "values", ObjTypeArray, values); +} + +/* + * Deparse an AlterObjectSchemaStmt (ALTER ... SET SCHEMA command) + * + * Given the object address and the parse tree that created it, return an + * ObjTree representing the alter command. + * + * Verbose syntax + * ALTER %s %{identity}s SET SCHEMA %{newschema}I + */ +static ObjTree* deparse_AlterObjectSchemaStmt(ObjectAddress address, Node *parsetree, + ObjectAddress old_schema) +{ + AlterObjectSchemaStmt *node = (AlterObjectSchemaStmt *) parsetree; + char *identity; + char *new_schema = node->newschema; + char *old_schname; + char *ident; + + /* + * Since the command has already taken place from the point of view of + * catalogs, getObjectIdentity returns the object name with the already + * changed schema. The output of our deparsing must return the original + * schema name, however, so we chop the schema name off the identity + * string and then prepend the quoted schema name. + * + * XXX This is pretty clunky. Can we do better? + */ + identity = getObjectIdentity(&address); + old_schname = get_namespace_name(old_schema.objectId); + if (!old_schname) + elog(ERROR, "cache lookup failed for schema with OID %u", + old_schema.objectId); + + ident = psprintf("%s%s", quote_identifier(old_schname), + identity + strlen(quote_identifier(new_schema))); + + return new_objtree_VA("ALTER %{objtype}s %{identity}s SET SCHEMA %{newschema}I", 3, + "objtype", ObjTypeString, + string_objtype(node->objectType, false), + "identity", ObjTypeString, ident, + "newschema", ObjTypeString, new_schema); +} + + +static ObjTree* deparse_CreateEventStmt(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + ObjTree *tmp_obj; + CreateEventStmt *stmt = (CreateEventStmt *) parsetree; + StringInfoData string_buf; + initStringInfo(&string_buf); + + char *ev_status = NULL; + if (stmt->def_name) { + appendStringInfo(&string_buf, "DEFINER = %s", stmt->def_name); + } + + if (stmt->event_status == EVENT_DISABLE) { + ev_status = pstrdup("DISABLE"); + } else if (stmt->event_status == EVENT_DISABLE_ON_SLAVE) { + ev_status = pstrdup("DISABLE ON SLAVE"); + } + + char *event_name_str = stmt->event_name->relname; + char *schema_name_str = stmt->event_name->schemaname; + + if (stmt->interval_time) { + ret = new_objtree_VA( + "CREATE %{definer_opt}s EVENT %{if_not_exits}s %{schema}s.%{eventname}s " + "ON SCHEDULE EVERY %{every_interval}s STARTS '%{start_expr}s'", 6, + "definer_opt", ObjTypeString, stmt->def_name ? pstrdup(string_buf.data) : "", + "if_not_exits", ObjTypeString, stmt->if_not_exists ? "IF NOT EXISTS" : "", + "schema", ObjTypeString, schema_name_str, + "eventname", ObjTypeString, event_name_str, + "every_interval", ObjTypeString, parseIntervalExprString(stmt->interval_time), + "start_expr", ObjTypeString, parseTimeExprString(stmt->start_time_expr)); + } else { + ret = new_objtree_VA( + "CREATE %{definer_opt}s EVENT %{if_not_exits}s %{schema}s.%{eventname}s " + "ON SCHEDULE STARTS '%{start_expr}s'", 5, + "definer_opt", ObjTypeString, stmt->def_name ? pstrdup(string_buf.data) : "", + "if_not_exits", ObjTypeString, stmt->if_not_exists ? "IF NOT EXISTS" : "", + "schema", ObjTypeString, schema_name_str, + "eventname", ObjTypeString, event_name_str, + "start_expr", ObjTypeString, parseTimeExprString(stmt->start_time_expr)); + } + if (stmt->end_time_expr) { + append_string_object(ret, "ENDS '%{end_expr}s'", + "end_expr", parseTimeExprString(stmt->end_time_expr)); + } + + tmp_obj = new_objtree_VA("%{opt_ev_on_completion}s %{opt_ev_status}s COMMENT '%{comment_opt}s' DO %{ev_body}s", 4, + "opt_ev_on_completion", ObjTypeString, + stmt->complete_preserve ? "ON COMPLETION NOT PRESERVE" : "ON COMPLETION PRESERVE", + "opt_ev_status", ObjTypeString, ev_status ? ev_status : "", + "comment_opt", ObjTypeString, stmt->event_comment_str ? stmt->event_comment_str : "", + "ev_body", ObjTypeString, stmt->event_query_str); + + append_object_object(ret, "%{event_body}s", tmp_obj); + + FreeStringInfo(&string_buf); + return ret; +} + +static ObjTree* deparse_AlterEventStmt(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + AlterEventStmt *stmt = (AlterEventStmt *) parsetree; + StringInfoData string_buf; + initStringInfo(&string_buf); + + char *event_name_str = stmt->event_name->relname; + char *schema_name_str = stmt->event_name->schemaname; + + if (stmt->def_name) { + Value *definerVal = (Value *)stmt->def_name->arg; + appendStringInfo(&string_buf, "DEFINER = %s", strVal(definerVal)); + } + + ret = new_objtree_VA("ALTER %{definer_opt}s EVENT %{schema}s.%{eventname}s ", 3, + "definer_opt", ObjTypeString, stmt->def_name ? pstrdup(string_buf.data) : "", + "schema", ObjTypeString, schema_name_str, + "eventname", ObjTypeString, event_name_str); + + if (stmt->interval_time || stmt->start_time_expr || stmt->end_time_expr) { + append_format_string(ret, "ON SCHEDULE "); + if (stmt->interval_time && stmt->interval_time->arg) { + append_string_object(ret, "EVERY %{every_interval}s ", + "every_interval", parseIntervalExprString(stmt->interval_time->arg)); + } + if (stmt->start_time_expr && stmt->start_time_expr->arg) { + if (stmt->interval_time && stmt->interval_time->arg) { + append_string_object(ret, "STARTS '%{start_expr}s' ", + "start_expr", parseTimeExprString(stmt->start_time_expr->arg)); + } else { + append_string_object(ret, "AT '%{start_expr}s' ", + "start_expr", parseTimeExprString(stmt->start_time_expr->arg)); + } + } + if (stmt->end_time_expr && stmt->end_time_expr->arg) { + append_string_object(ret, "ENDS '%{end_expr}s' ", + "end_expr", parseTimeExprString(stmt->end_time_expr->arg)); + } + } + + /* preserve_opt rename_opt status_opt comments_opt action_opt */ + if (stmt->complete_preserve && stmt->complete_preserve->arg) { + Value *arg = (Value *)stmt->complete_preserve->arg; + if (!intVal(arg)) { + append_format_string(ret, "ON COMPLETION PRESERVE "); + } else { + append_format_string(ret, "ON COMPLETION NOT PRESERVE "); + } + } + + if (stmt->new_name && stmt->new_name->arg) { + Value *arg = (Value *)stmt->new_name->arg; + append_string_object(ret, "RENAME TO %{new_name}s ", + "new_name", strVal(arg)); + } + + if (stmt->event_status && stmt->event_status->arg) { + Value *arg = (Value *)stmt->event_status->arg; + EventStatus ev_status = (EventStatus)intVal(arg); + if (ev_status == EVENT_ENABLE) { + append_format_string(ret, "ENABLE "); + } else if (ev_status == EVENT_DISABLE) { + append_format_string(ret, "DISABLE "); + } else if (ev_status == EVENT_DISABLE_ON_SLAVE) { + append_format_string(ret, "DISABLE ON SLAVE "); + } + } + if (stmt->event_comment_str && stmt->event_comment_str->arg) { + Value *arg = (Value *)stmt->event_comment_str->arg; + append_string_object(ret, "COMMENT '%{comment}s' ", + "comment", strVal(arg)); + } + if (stmt->event_query_str && stmt->event_query_str) { + Value *arg = (Value *)stmt->event_query_str->arg; + append_string_object(ret, "DO %{action}s ", + "action", strVal(arg)); + } + + return ret; +} + +static ObjTree* deparse_DropEventStmt(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + DropEventStmt *stmt = (DropEventStmt *) parsetree; + char *event_name_str = stmt->event_name->relname; + char *schema_name_str = stmt->event_name->schemaname; + + ret = new_objtree_VA("DROP EVENT %{if_exists}s %{schema}s.%{eventname}s", 3, + "if_exists", ObjTypeString, stmt->missing_ok ? "IF EXISTS" : "", + "schema", ObjTypeString, schema_name_str, + "eventname", ObjTypeString, event_name_str); + + return ret; +} + /* * Handle deparsing of simple commands. * * This function should cover all cases handled in ProcessUtilitySlow. */ -static ObjTree * -deparse_simple_command(CollectedCommand *cmd, bool *include_owner) +static ObjTree* deparse_simple_command(CollectedCommand *cmd, bool *include_owner) { Oid objectId; Node *parsetree; @@ -2368,15 +3629,17 @@ deparse_simple_command(CollectedCommand *cmd, bool *include_owner) /* This switch needs to handle everything that ProcessUtilitySlow does */ switch (nodeTag(parsetree)) { - case T_CreateStmt: - return deparse_CreateStmt(objectId, parsetree); - - case T_IndexStmt: - return deparse_IndexStmt(objectId, parsetree); - - case T_CreateSeqStmt: - return deparse_CreateSeqStmt(objectId, parsetree); - + case T_AlterFunctionStmt: + *include_owner = false; + return deparse_AlterFunction(objectId, parsetree); + case T_AlterObjectSchemaStmt: + *include_owner = false; + return deparse_AlterObjectSchemaStmt(cmd->d.simple.address, + parsetree, + cmd->d.simple.secondaryObject); + case T_AlterSchemaStmt: + *include_owner = false; + return deparse_AlterSchemaStmt(objectId, parsetree); case T_AlterSeqStmt: *include_owner = false; return deparse_AlterSeqStmt(objectId, parsetree); @@ -2385,19 +3648,1410 @@ deparse_simple_command(CollectedCommand *cmd, bool *include_owner) *include_owner = false; return deparse_CommentStmt(cmd->d.simple.address, parsetree); - default: - elog(INFO, "unrecognized node type in deparse command: %d", + case T_CompositeTypeStmt: + return deparse_CompositeTypeStmt(objectId, parsetree); + + case T_CreateEnumStmt: /* CREATE TYPE AS ENUM */ + return deparse_CreateEnumStmt(objectId, parsetree); + + case T_CreateFunctionStmt: + return deparse_CreateFunction(objectId, parsetree); + + case T_CreateSchemaStmt: + return deparse_CreateSchemaStmt(objectId, parsetree); + + case T_CreateSeqStmt: + return deparse_CreateSeqStmt(objectId, parsetree); + case T_CreateStmt: + return deparse_CreateStmt(objectId, parsetree); + case T_CreateTrigStmt: + return deparse_CreateTrigStmt(objectId, parsetree); + + case T_IndexStmt: + return deparse_IndexStmt(objectId, parsetree); + case T_RenameStmt: + *include_owner = false; + return deparse_RenameStmt(cmd->d.simple.address, parsetree); + case T_ViewStmt: + return deparse_ViewStmt(objectId, parsetree); + case T_CreateEventStmt: + return deparse_CreateEventStmt(objectId, parsetree); + case T_AlterEventStmt: + return deparse_AlterEventStmt(objectId, parsetree); + case T_DropEventStmt: + return deparse_DropEventStmt(objectId, parsetree); + default: + if (u_sess->hook_cxt.deparseCollectedCommandHook != NULL) { + return (ObjTree*)((deparseCollectedCommand)(u_sess->hook_cxt.deparseCollectedCommandHook)) + (DEPARSE_SIMPLE_COMMAND, cmd, NULL, NULL); + } + elog(INFO, "unrecognized node type in deparse command: %d", (int) nodeTag(parsetree)); } return NULL; } + +/* + * ... ALTER COLUMN ... SET/RESET (...) + * + * Verbose syntax + * ALTER COLUMN %{column}I RESET|SET (%{options:, }s) + */ +static ObjTree* deparse_ColumnSetOptions(AlterTableCmd *subcmd) +{ + List *sets = NIL; + ListCell *cell; + ObjTree *ret; + bool is_reset = subcmd->subtype == AT_ResetOptions; + + ret = new_objtree_VA("ALTER COLUMN %{column}I %{option}s", 2, + "column", ObjTypeString, subcmd->name, + "option", ObjTypeString, is_reset ? "RESET" : "SET"); + + foreach(cell, (List *) subcmd->def) { + DefElem *elem; + ObjTree *set; + + elem = (DefElem *) lfirst(cell); + set = deparse_DefElem(elem, is_reset); + sets = lappend(sets, new_object_object(set)); + } + + Assert(sets); + append_array_object(ret, "(%{options:, }s)", sets); + + return ret; +} + +/* + * ... ALTER COLUMN ... SET/RESET (...) + * + * Verbose syntax + * RESET|SET (%{options:, }s) + */ +static ObjTree* deparse_RelSetOptions(AlterTableCmd *subcmd) +{ + List *sets = NIL; + ListCell *cell; + bool is_reset = subcmd->subtype == AT_ResetRelOptions; + + foreach(cell, (List *) subcmd->def) { + DefElem *elem; + ObjTree *set; + + elem = (DefElem *) lfirst(cell); + set = deparse_DefElem(elem, is_reset); + sets = lappend(sets, new_object_object(set)); + } + + Assert(sets); + + return new_objtree_VA("%{set_reset}s (%{options:, }s)", 2, + "set_reset", ObjTypeString, is_reset ? "RESET" : "SET", + "options", ObjTypeArray, sets); +} + +/* + * Deparse all the collected subcommands and return an ObjTree representing the + * alter command. + * + * Verbose syntax + * ALTER reltype %{only}s %{identity}D %{subcmds:, }s + */ +static ObjTree* deparse_AlterRelation(CollectedCommand *cmd, ddl_deparse_context *context) +{ + ObjTree *ret; + ObjTree *tmp_obj = NULL; + ObjTree *tmp_obj2; + List *dpcontext; + Relation rel; + List *subcmds = NIL; + ListCell *cell; + const char *reltype = NULL; + bool istype = false; + bool istable = false; + bool isrewrite = false; + List *exprs = NIL; + Oid relId = cmd->d.alterTable.objectId; + AlterTableStmt *stmt = NULL; + bool isonly = false; + ObjTree *loc_obj; + + Assert(cmd->type == SCT_AlterTable); + + stmt = (AlterTableStmt *) cmd->parsetree; + if (stmt && !IsA(stmt, AlterTableStmt)) { + return NULL; + } + + isrewrite = cmd->d.alterTable.rewrite; + + /* + * ALTER TABLE subcommands generated for TableLikeClause is processed in + * the top level CREATE TABLE command; return empty here. + */ + rel = relation_open(relId, AccessShareLock); + dpcontext = deparse_context_for(RelationGetRelationName(rel), + relId); + + switch (rel->rd_rel->relkind) { + case RELKIND_RELATION: + reltype = "TABLE"; + istable = true; + if (stmt->relation->inhOpt == INH_NO) { + isonly = true; + } + break; + case RELKIND_INDEX: + case RELKIND_GLOBAL_INDEX: + reltype = "INDEX"; + break; + case RELKIND_VIEW: + reltype = "VIEW"; + break; + case RELKIND_COMPOSITE_TYPE: + reltype = "TYPE"; + istype = true; + break; + case RELKIND_FOREIGN_TABLE: + reltype = "FOREIGN TABLE"; + break; + case RELKIND_MATVIEW: + reltype = "MATERIALIZED VIEW"; + break; + + case RELKIND_SEQUENCE: + reltype = "SEQUENCE"; + break; + case RELKIND_LARGE_SEQUENCE: + reltype = "LARGE SEQUENCE"; + break; + default: + elog(ERROR, "unexpected relkind %d", rel->rd_rel->relkind); + } + + ret = new_objtree_VA("ALTER %{objtype}s %{only}s %{identity}D", 3, + "objtype", ObjTypeString, reltype, + "only", ObjTypeString, isonly ? "ONLY" : "", + "identity", ObjTypeObject, + new_objtree_for_qualname(rel->rd_rel->relnamespace, + RelationGetRelationName(rel))); + + foreach(cell, cmd->d.alterTable.subcmds) { + CollectedATSubcmd *sub = (CollectedATSubcmd *) lfirst(cell); + AlterTableCmd *subcmd = (AlterTableCmd *) sub->parsetree; + ObjTree *tree; + + Assert(IsA(subcmd, AlterTableCmd)); + + /* + * Skip deparse of the subcommand if the objectId doesn't match the + * target relation ID. It can happen for inherited tables when + * subcommands for inherited tables and the parent table are both + * collected in the ALTER TABLE command for the parent table. With the + * exception of the internally generated AddConstraint (for + * ALTER TABLE ADD CONSTRAINT FOREIGN KEY REFERENCES) where the + * objectIds could mismatch (forein table id and the referenced table + * id). + */ + if (subcmd->recursing) + continue; + + switch (subcmd->subtype) { + case AT_AddColumn: + case AT_AddColumnRecurse: + /* XXX need to set the "recurse" bit somewhere? */ + Assert(IsA(subcmd->def, ColumnDef)); + + if (istype && subcmd->subtype == AT_AddColumnRecurse) { + /* maybe is recurse subcmd for alter type command */ + break; + } + + tree = deparse_ColumnDef(rel, dpcontext, false, + (ColumnDef *) subcmd->def, true, &exprs); + mark_function_volatile(context, (Node*)exprs); + tmp_obj = new_objtree_VA("ADD %{objtype}s %{if_not_exists}s %{definition}s %{cascade}s", 5, + "objtype", ObjTypeString, + istype ? "ATTRIBUTE" : "COLUMN", + "type", ObjTypeString, "add column", + "if_not_exists", ObjTypeString, + subcmd->missing_ok ? "IF NOT EXISTS" : "", + "definition", ObjTypeObject, tree, + "cascade", ObjTypeString, subcmd->behavior == DROP_CASCADE ? "CASCADE" : ""); + + if (subcmd->is_first) { + loc_obj = new_objtree("FIRST"); + append_object_object(tmp_obj, "%{add_first}s", loc_obj); + } else if (subcmd->after_name) { + loc_obj = new_objtree_VA("AFTER %{name}I", 1, "name", ObjTypeString, subcmd->after_name); + append_object_object(tmp_obj, "%{add_after_name}s", loc_obj); + } + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + + break; + case AT_AddIndex: { + Oid idxOid = sub->address.objectId; + IndexStmt *istmt; + Relation idx; + const char *idxname; + Oid constrOid; + + Assert(IsA(subcmd->def, IndexStmt)); + istmt = (IndexStmt *) subcmd->def; + + if (!istmt->isconstraint) + break; + + if (isrewrite) + break; + + idx = relation_open(idxOid, AccessShareLock); + idxname = RelationGetRelationName(idx); + + constrOid = get_relation_constraint_oid(cmd->d.alterTable.objectId, idxname, false); + + tmp_obj = new_objtree_VA("ADD CONSTRAINT %{name}I %{definition}s", 3, + "type", ObjTypeString, "add constraint", + "name", ObjTypeString, idxname, + "definition", ObjTypeString, + pg_get_constraintdef_part_string(constrOid)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + + relation_close(idx, AccessShareLock); + } + break; + case AT_AddIndexConstraint: { + IndexStmt *istmt; + Relation idx; + Oid constrOid = sub->address.objectId; + char *indexname = NULL; + ListCell *lcell; + Assert(IsA(subcmd->def, IndexStmt)); + istmt = (IndexStmt *) subcmd->def; + + Assert(istmt->isconstraint && istmt->unique); + + idx = relation_open(istmt->indexOid, AccessShareLock); + + foreach (lcell, istmt->options) { + DefElem* def = (DefElem*)lfirst(lcell); + if (pg_strcasecmp(def->defname, "origin_indexname") == 0) { + indexname = defGetString(def); + break; + } + } + + /* + * Verbose syntax + * + * ADD CONSTRAINT %{name}I %{constraint_type}s USING INDEX + * %index_name}I %{deferrable}s %{init_deferred}s + */ + tmp_obj = new_objtree_VA( + "ADD CONSTRAINT %{name}I %{constraint_type}s " + "USING INDEX %{index_name}I %{deferrable}s %{init_deferred}s", 6, + "type", ObjTypeString, "add constraint using index", + "name", ObjTypeString, get_constraint_name(constrOid), + "constraint_type", ObjTypeString, + istmt->primary ? "PRIMARY KEY" : "UNIQUE", + "index_name", ObjTypeString, + indexname ? indexname : RelationGetRelationName(idx), + "deferrable", ObjTypeString, + istmt->deferrable ? "DEFERRABLE" : "NOT DEFERRABLE", + "init_deferred", ObjTypeString, + istmt->initdeferred ? "INITIALLY DEFERRED" : "INITIALLY IMMEDIATE"); + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + + relation_close(idx, AccessShareLock); + } + break; + + case AT_ReAddIndex: + case AT_ReAddConstraint: + case AT_ReplaceRelOptions: + /* Subtypes used for internal operations; nothing to do here */ + break; + + case AT_AddColumnToView: + /* CREATE OR REPLACE VIEW -- nothing to do here */ + break; + + case AT_ColumnDefault: + if (subcmd->def == NULL) { + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}I DROP DEFAULT", 2, + "type", ObjTypeString, "drop default", + "column", ObjTypeString, subcmd->name); + } else { + List *dpcontext_rel; + HeapTuple attrtup; + AttrNumber attno; + + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}I SET DEFAULT", 2, + "type", ObjTypeString, "set default", + "column", ObjTypeString, subcmd->name); + + dpcontext_rel = deparse_context_for(RelationGetRelationName(rel), + RelationGetRelid(rel)); + attrtup = SearchSysCacheAttName(RelationGetRelid(rel), subcmd->name); + attno = ((Form_pg_attribute) GETSTRUCT(attrtup))->attnum; + append_string_object(tmp_obj, "%{definition}s", "definition", + RelationGetColumnDefault(rel, attno, + dpcontext_rel, + NULL)); + ReleaseSysCache(attrtup); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DropNotNull: + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}I DROP NOT NULL", 2, + "type", ObjTypeString, "drop not null", + "column", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_SetNotNull: + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}I SET NOT NULL", 2, + "type", ObjTypeString, "set not null", + "column", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_SetStatistics: { + Assert(IsA(subcmd->def, Integer)); + if (subcmd->additional_property) { + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}I SET STATISTICS %{statistics}n", 3, + "type", ObjTypeString, "set statistics", + "column", ObjTypeString, subcmd->name, + "statistics", ObjTypeInteger, + intVal((Value *)subcmd->def)); + } else { + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}n SET STATISTICS PERCENT %{statistics}n", 4, + "type", ObjTypeString, "set statistics", + "column", ObjTypeString, subcmd->name, + "statistics", ObjTypeInteger, + intVal((Value *)subcmd->def)); + } + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_SetOptions: + case AT_ResetOptions: + subcmds = lappend(subcmds, new_object_object(deparse_ColumnSetOptions(subcmd))); + break; + + case AT_SetStorage: + Assert(IsA(subcmd->def, String)); + tmp_obj = new_objtree_VA("ALTER COLUMN %{column}I SET STORAGE %{storage}s", 3, + "type", ObjTypeString, "set storage", + "column", ObjTypeString, subcmd->name, + "storage", ObjTypeString, + strVal((Value *)subcmd->def)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_SET_COMPRESS: + tmp_obj = new_objtree_VA("SET %{compression}s", 2, + "type", ObjTypeString, "set compression", + "compression", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_EnableRowMoveMent: + tmp_obj = new_objtree_VA("ENABLE ROW MOVEMENT", 1, + "type", ObjTypeString, "enable row movement"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_DisableRowMoveMent: + tmp_obj = new_objtree_VA("DISABLE ROW MOVEMENT", 1, + "type", ObjTypeString, "disable row movement"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_SetAutoIncrement: + tmp_obj = new_objtree_VA("AUTO_INCREMENT", 1, + "type", ObjTypeString, "auto_increment"); + if (subcmd->def && IsA(subcmd->def, Integer)) { + append_int_object(tmp_obj, "%{autoincstart}n", (int32)intVal(subcmd->def)); + } else { + append_string_object(tmp_obj, "%{autoincstart}s", "autoincstart", strVal(subcmd->def)); + } + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_COMMENTS: + tmp_obj = new_objtree_VA("COMMENT=%{comment}L", 2, + "type", ObjTypeString, "comment", + "comment", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_DropColumn: + case AT_DropColumnRecurse: + if (istype && subcmd->subtype == AT_DropColumnRecurse) { + /* maybe is recurse subcmd for alter type command */ + break; + } + tmp_obj = new_objtree_VA("DROP %{objtype}s %{if_exists}s %{column}I", 4, + "objtype", ObjTypeString, + istype ? "ATTRIBUTE" : "COLUMN", + "type", ObjTypeString, "drop column", + "if_exists", ObjTypeString, + subcmd->missing_ok ? "IF EXISTS" : "", + "column", ObjTypeString, subcmd->name); + tmp_obj2 = new_objtree_VA("CASCADE", 1, + "present", ObjTypeBool, subcmd->behavior); + append_object_object(tmp_obj, "%{cascade}s", tmp_obj2); + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_AddConstraint: + case AT_AddConstraintRecurse: { + /* XXX need to set the "recurse" bit somewhere? */ + Oid constrOid = sub->address.objectId; + bool isnull; + HeapTuple tup; + Datum val; + Constraint *constr; + + /* Skip adding constraint for inherits table sub command */ + if (!constrOid) + continue; + + Assert(IsA(subcmd->def, Constraint)); + constr = castNode(Constraint, subcmd->def); + if (!constr->skip_validation) { + tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constrOid)); + if (HeapTupleIsValid(tup)) { + char *conbin; + + /* Fetch constraint expression in parsetree form */ + val = SysCacheGetAttr(CONSTROID, tup, + Anum_pg_constraint_conbin, &isnull); + + if (!isnull) { + conbin = TextDatumGetCString(val); + exprs = lappend(exprs, stringToNode(conbin)); + mark_function_volatile(context, (Node *)exprs); + } + + ReleaseSysCache(tup); + } + } + + tmp_obj = new_objtree_VA("ADD CONSTRAINT %{name}I %{definition}s", 3, + "type", ObjTypeString, "add constraint", + "name", ObjTypeString, get_constraint_name(constrOid), + "definition", ObjTypeString, + pg_get_constraintdef_part_string(constrOid)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + + case AT_ValidateConstraint: + case AT_ValidateConstraintRecurse: + tmp_obj = new_objtree_VA("VALIDATE CONSTRAINT %{constraint}I", 2, + "type", ObjTypeString, "validate constraint", + "constraint", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DropConstraint: + case AT_DropConstraintRecurse: + if (subcmd->name == NULL && u_sess->attr.attr_sql.dolphin) { + tmp_obj = new_objtree_VA("DROP PRIMARY KEY", 1, + "type", ObjTypeString, "drop primary key"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } else { + tmp_obj = new_objtree_VA("DROP CONSTRAINT %{if_exists}s %{constraint}I %{cascade}s", 4, + "type", ObjTypeString, "drop constraint", + "if_exists", ObjTypeString, + subcmd->missing_ok ? "IF EXISTS" : "", + "constraint", ObjTypeString, subcmd->name, + "cascade", ObjTypeString, + subcmd->behavior == DROP_CASCADE ? "CASCADE" : ""); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + + case AT_AlterColumnType: { + Form_pg_attribute att; + ColumnDef *def; + HeapTuple heapTup; + int attnum = 0; + if (istype && (relId != sub->address.objectId)) { + /* recurse to cascade table for alter type */ + break; + } + /* attrnum may be change by modify */ + heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), subcmd->name); + if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */ + ereport(ERROR, + (errmsg("column \"%s\" of relation \"%s\" does not exist", + subcmd->name, RelationGetRelationName(rel)))); + att = (Form_pg_attribute) GETSTRUCT(heapTup); + attnum = att->attnum; + + def = (ColumnDef *) subcmd->def; + Assert(IsA(def, ColumnDef)); + + /* + * Verbose syntax + * + * Composite types: ALTER reltype %{column}I SET DATA TYPE + * %{datatype}T %{collation}s ATTRIBUTE %{cascade}s + * + * Normal types: ALTER reltype %{column}I SET DATA TYPE + * %{datatype}T %{collation}s COLUMN %{using}s + */ + tmp_obj = new_objtree_VA("ALTER %{objtype}s %{column}I SET DATA TYPE %{datatype}T", 4, + "objtype", ObjTypeString, + istype ? "ATTRIBUTE" : "COLUMN", + "type", ObjTypeString, "alter column type", + "column", ObjTypeString, subcmd->name, + "datatype", ObjTypeObject, + new_objtree_for_type(att->atttypid, + att->atttypmod)); + + /* Add a COLLATE clause, if needed */ + tmp_obj2 = new_objtree("COLLATE"); + if (OidIsValid(att->attcollation)) { + ObjTree *collname; + + collname = new_objtree_for_qualname_id(CollationRelationId, + att->attcollation); + append_object_object(tmp_obj2, "%{name}D", collname); + } else { + append_not_present(tmp_obj2, "%{name}D"); + } + append_object_object(tmp_obj, "%{collation}s", tmp_obj2); + + /* If not a composite type, add the USING clause */ + if (!istype) { + /* + * If there's a USING clause, transformAlterTableStmt + * ran it through transformExpr and stored the + * resulting node in cooked_default, which we can use + * here. + */ + tmp_obj2 = new_objtree("USING"); + if (def->raw_default && sub->usingexpr) { + mark_function_volatile(context, def->cooked_default); + + if (contain_mutable_functions(def->cooked_default)) { + /* + * allow using modify the idntity value only if + * the value is stable, or need to use replident identity + * attr dml change to output change. + */ + check_alter_table_rewrite_replident_change(rel, attnum, "ALTER TYPE USING"); + } + append_string_object(tmp_obj2, "%{expression}s", + "expression", + sub->usingexpr); + } else { + append_not_present(tmp_obj2, "%{expression}s"); + } + + append_object_object(tmp_obj, "%{using}s", tmp_obj2); + } + + /* If it's a composite type, add the CASCADE clause */ + if (istype) { + tmp_obj2 = new_objtree("CASCADE"); + if (subcmd->behavior != DROP_CASCADE) + append_not_present(tmp_obj2, NULL); + append_object_object(tmp_obj, "%{cascade}s", tmp_obj2); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + +#ifdef TODOLIST + case AT_AlterColumnGenericOptions: + tmp_obj = deparse_FdwOptions((List *) subcmd->def, + subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; +#endif + case AT_ChangeOwner: + tmp_obj = new_objtree_VA("OWNER TO %{owner}I", 2, + "type", ObjTypeString, "change owner", + "owner", ObjTypeString, + "subcmd->newowner"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_ClusterOn: + tmp_obj = new_objtree_VA("CLUSTER ON %{index}I", 2, + "type", ObjTypeString, "cluster on", + "index", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DropCluster: + tmp_obj = new_objtree_VA("SET WITHOUT CLUSTER", 1, + "type", ObjTypeString, "set without cluster"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DropOids: + tmp_obj = new_objtree_VA("SET WITHOUT OIDS", 1, + "type", ObjTypeString, "set without oids"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_SetTableSpace: + tmp_obj = new_objtree_VA("SET TABLESPACE %{tablespace}I", 2, + "type", ObjTypeString, "set tablespace", + "tablespace", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_SetRelOptions: + case AT_ResetRelOptions: + subcmds = lappend(subcmds, new_object_object(deparse_RelSetOptions(subcmd))); + break; + + case AT_EnableTrig: + tmp_obj = new_objtree_VA("ENABLE TRIGGER %{trigger}I", 2, + "type", ObjTypeString, "enable trigger", + "trigger", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableAlwaysTrig: + tmp_obj = new_objtree_VA("ENABLE ALWAYS TRIGGER %{trigger}I", 2, + "type", ObjTypeString, "enable always trigger", + "trigger", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableReplicaTrig: + tmp_obj = new_objtree_VA("ENABLE REPLICA TRIGGER %{trigger}I", 2, + "type", ObjTypeString, "enable replica trigger", + "trigger", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DisableTrig: + tmp_obj = new_objtree_VA("DISABLE TRIGGER %{trigger}I", 2, + "type", ObjTypeString, "disable trigger", + "trigger", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableTrigAll: + tmp_obj = new_objtree_VA("ENABLE TRIGGER ALL", 1, + "type", ObjTypeString, "enable trigger all"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DisableTrigAll: + tmp_obj = new_objtree_VA("DISABLE TRIGGER ALL", 1, + "type", ObjTypeString, "disable trigger all"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableTrigUser: + tmp_obj = new_objtree_VA("ENABLE TRIGGER USER", 1, + "type", ObjTypeString, "enable trigger user"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DisableTrigUser: + tmp_obj = new_objtree_VA("DISABLE TRIGGER USER", 1, + "type", ObjTypeString, "disable trigger user"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableRule: + tmp_obj = new_objtree_VA("ENABLE RULE %{rule}I", 2, + "type", ObjTypeString, "enable rule", + "rule", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableAlwaysRule: + tmp_obj = new_objtree_VA("ENABLE ALWAYS RULE %{rule}I", 2, + "type", ObjTypeString, "enable always rule", + "rule", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_EnableReplicaRule: + tmp_obj = new_objtree_VA("ENABLE REPLICA RULE %{rule}I", 2, + "type", ObjTypeString, "enable replica rule", + "rule", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DisableRule: + tmp_obj = new_objtree_VA("DISABLE RULE %{rule}I", 2, + "type", ObjTypeString, "disable rule", + "rule", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_AddInherit: + tmp_obj = new_objtree_VA("INHERIT %{parent}D", 2, + "type", ObjTypeString, "inherit", + "parent", ObjTypeObject, + new_objtree_for_qualname_id(RelationRelationId, + sub->address.objectId)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DropInherit: + tmp_obj = new_objtree_VA("NO INHERIT %{parent}D", 2, + "type", ObjTypeString, "drop inherit", + "parent", ObjTypeObject, + new_objtree_for_qualname_id(RelationRelationId, + sub->address.objectId)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_AddOf: + tmp_obj = new_objtree_VA("OF %{type_of}T", 2, + "type", ObjTypeString, "add of", + "type_of", ObjTypeObject, + new_objtree_for_type(sub->address.objectId, -1)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_DropOf: + tmp_obj = new_objtree_VA("NOT OF", 1, + "type", ObjTypeString, "not of"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_ReplicaIdentity: + tmp_obj = new_objtree_VA("REPLICA IDENTITY", 1, "type", ObjTypeString, "replica identity"); + switch (((ReplicaIdentityStmt *)subcmd->def)->identity_type) { + case REPLICA_IDENTITY_DEFAULT: + append_string_object(tmp_obj, "%{ident}s", "ident", "DEFAULT"); + break; + case REPLICA_IDENTITY_FULL: + append_string_object(tmp_obj, "%{ident}s", "ident", "FULL"); + break; + case REPLICA_IDENTITY_NOTHING: + append_string_object(tmp_obj, "%{ident}s", "ident", "NOTHING"); + break; + case REPLICA_IDENTITY_INDEX: + tmp_obj2 = new_objtree_VA("USING INDEX %{index}I", 1, "index", ObjTypeString, + ((ReplicaIdentityStmt *)subcmd->def)->name); + append_object_object(tmp_obj, "%{ident}s", tmp_obj2); + break; + } + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_EnableRls: + tmp_obj = new_objtree_VA("ENABLE ROW LEVEL SECURITY", 1, + "type", ObjTypeString, "enable row security"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_DisableRls: + tmp_obj = new_objtree_VA("DISABLE ROW LEVEL SECURITY", 1, + "type", ObjTypeString, "disable row security"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_ForceRls: + tmp_obj = new_objtree_VA("FORCE ROW LEVEL SECURITY", 1, + "type", ObjTypeString, "disable row security"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + + case AT_NoForceRls: + tmp_obj = new_objtree_VA("NO FORCE ROW LEVEL SECURITY", 1, + "type", ObjTypeString, "disable row security"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_InvisibleIndex: + tmp_obj = new_objtree_VA("ALTER INDEX %{name}I INVISIBLE", 2, + "type", ObjTypeString, "alter index invisible", + "name", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_VisibleIndex: + tmp_obj = new_objtree_VA("ALTER INDEX %{name}I VISIBLE", 2, + "type", ObjTypeString, "alter index visible", + "name", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_SetCharsetCollate: { + CharsetCollateOptions *n = (CharsetCollateOptions *) subcmd->def; + if (n->charset != PG_INVALID_ENCODING) { + tmp_obj = new_objtree_VA("DEFAULT CHARACTER SET = %{charset}s", 2, "type", ObjTypeString, + "default character set", "charset", ObjTypeString, + pg_encoding_to_char(n->charset)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + if (n->collate) { + tmp_obj = new_objtree_VA("COLLATE = %{collate}s", 2, "type", ObjTypeString, "collate", + "collate", ObjTypeString, n->collate); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + } + break; + case AT_ConvertCharset: { + CharsetCollateOptions *cc = (CharsetCollateOptions *)subcmd->def; + if (cc->charset != PG_INVALID_ENCODING) { + tmp_obj = + new_objtree_VA("CONVERT TO CHARACTER SET %{charset}s", 2, "type", ObjTypeString, + "convert charset", "charset", ObjTypeString, pg_encoding_to_char(cc->charset)); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + if (cc->collate) { + tmp_obj = new_objtree_VA("COLLATE = %{collate}s", 2, + "type", ObjTypeString, "collate", + "collate", ObjTypeString, cc->collate); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + } + break; + case AT_ModifyColumn: { + AttrNumber attnum; + Oid typid; + int32 typmod; + Oid typcollation; + ColumnDef *def = (ColumnDef *) subcmd->def; + + attnum = get_attnum(RelationGetRelid(rel), def->colname); + if (attnum == InvalidAttrNumber) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + def->colname, RelationGetRelationName(rel)))); + } + + get_atttypetypmodcoll(RelationGetRelid(rel), attnum, &typid, &typmod, &typcollation); + + /* MODIFY_P COLUMN ColId Typename opt_charset ColQualList opt_column_options add_column_first_after + */ + if (pg_strcasecmp(subcmd->name, def->colname) == 0) { + tmp_obj = new_objtree_VA("MODIFY COLUMN %{column}I", 2, + "type", ObjTypeString, "modify column", + "column", ObjTypeString, def->colname); + } else { + tmp_obj = new_objtree_VA("CHANGE %{ori_column}I %{column}I", 3, + "type", ObjTypeString, "change coulumn", + "ori_column", ObjTypeString, subcmd->name, + "column", ObjTypeString, def->colname); + } + append_object_object(tmp_obj, "%{datatype}T", + new_objtree_for_type(typid, typmod)); + + if (def->typname->charset != PG_INVALID_ENCODING) + append_string_object(tmp_obj, "CHARACTER SET %{charset}s", "charset", + pg_encoding_to_char(def->typname->charset)); + + deparse_ColumnDef_constraints(tmp_obj, rel, def, dpcontext, &exprs); + + if (def->collClause) { + append_object_object(tmp_obj, "COLLATE %{name}D", + new_objtree_for_qualname_id(CollationRelationId, typcollation)); + } + + /* column_options comment will be set by commentStmt in alist */ + + if (subcmd->is_first) { + append_format_string(tmp_obj, "FIRST"); + } else if (subcmd->after_name) { + append_string_object(tmp_obj, "AFTER %{col}I", "col", subcmd->after_name); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_AddPartition: { + /* ADD PARTITION name */ + AddPartitionState *s = (AddPartitionState*)subcmd->def; + ListCell *lc; + + Oid *partkey_types = NULL; + Oid *subpartkey_types = NULL; + int partkey_num, subpartkey_num = 0; + partkey_num = get_partition_key_types(relId, RELKIND_RELATION, &partkey_types); + + foreach (lc, s->partitionList) { + Node* partition = (Node*)lfirst(lc); + + if (IsA(partition, RangePartitionDefState)) { + /* RangePartitionStartEndDefState will transform the RangePartitionDefState list */ + RangePartitionDefState *p = (RangePartitionDefState *)partition; + + tmp_obj = new_objtree_VA("ADD PARTITION %{name}I VALUES LESS THAN", 2, + "type", ObjTypeString, "add partition", + "name", ObjTypeString, p->partitionName); + + //----------- parse for maxValueList expr + Oid partoid = InvalidOid; + List *boundlist = deparse_partition_boudaries( + relId, PARTTYPE_PARTITIONED_RELATION, PART_STRATEGY_RANGE, + p->partitionName, &partoid, partkey_num, partkey_types); + + append_array_object(tmp_obj, "(%{maxvalues:, }s)", boundlist); + + if (p->tablespacename) { + append_string_object(tmp_obj, "TABLESPACE %{tblspc}s", "tblspc", p->tablespacename); + } + + if (p->subPartitionDefState) { + int subpartkey_num = get_partition_key_types(relId, + PARTTYPE_PARTITIONED_RELATION, &subpartkey_types); + + deparse_add_subpartition(tmp_obj, partoid, p->subPartitionDefState, + subpartkey_num, subpartkey_types); + } + } else if (IsA(partition, ListPartitionDefState)) { + ListPartitionDefState *p = (ListPartitionDefState*)partition; + tmp_obj = new_objtree_VA("ADD PARTITION %{name}I VALUES", 2, + "type", ObjTypeString, "add partition", + "name", ObjTypeString, p->partitionName); + Oid partoid = InvalidOid; + + List *boundlist = + deparse_partition_boudaries(relId, PARTTYPE_PARTITIONED_RELATION, PART_STRATEGY_LIST, + p->partitionName, &partoid, partkey_num, partkey_types); + append_array_object(tmp_obj, "(%{maxvalues:, }s)", boundlist); + + if (p->tablespacename) { + append_string_object(tmp_obj, "TABLESPACE %{tblspc}s", "tblspc", p->tablespacename); + } + if (p->subPartitionDefState) { + subpartkey_num = get_partition_key_types(relId, PARTTYPE_PARTITIONED_RELATION, + &subpartkey_types); + + deparse_add_subpartition(tmp_obj, partoid, p->subPartitionDefState, + subpartkey_num, subpartkey_types); + } + } else if (IsA(partition, HashPartitionDefState)) { + HashPartitionDefState* p = (HashPartitionDefState*)partition; + tmp_obj = new_objtree_VA("ADD PARTITION %{name}I", 2, + "type", ObjTypeString, "add partition", + "name", ObjTypeString, p->partitionName); + if (p->tablespacename) { + append_string_object(tmp_obj, "TABLESPACE %{tblspc}s", "tblspc", p->tablespacename); + } + } else { + elog(WARNING, "unsupported AddPartitionState %d for partition table", nodeTag(partition)); + break; + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + } + break; + case AT_AddSubPartition: { + AddSubPartitionState *s = (AddSubPartitionState*)subcmd->def; + ListCell* lc; + + tmp_obj = new_objtree_VA("MODIFY PARTITION %{name}I", 2, + "type", ObjTypeString, "modify partition add subpartition", + "name", ObjTypeString, s->partitionName); + foreach(lc, s->subPartitionList) { + if (IsA(lfirst(lc), RangePartitionDefState)) { + RangePartitionDefState *p = (RangePartitionDefState*)lfirst(lc); + append_string_object(tmp_obj, "ADD SUBPARTITION %{subpart}I", "subpart", p->partitionName); + List *maxvalues = get_range_partition_maxvalues(p->boundary); + append_array_object(tmp_obj, "VALUES LESS THAN (%{maxvalues:, }s)", maxvalues); + if (p->tablespacename) { + append_string_object(tmp_obj, "TABLESPACE %{tblspc}s", "tblspc", p->tablespacename); + } + } else { + ListPartitionDefState *p = (ListPartitionDefState*)lfirst(lc); + append_string_object(tmp_obj, "ADD SUBPARTITION %{subpart}I", "subpart", p->partitionName); + List *maxvalues = get_list_partition_maxvalues(p->boundary); + append_array_object(tmp_obj, "VALUES (%{maxvalues:, }s)", maxvalues); + if (p->tablespacename) { + append_string_object(tmp_obj, "TABLESPACE %{tblspc}s", "tblspc", p->tablespacename); + } + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + } + break; + + case AT_DropPartition: + case AT_DropSubPartition: { + tmp_obj = new_objtree_VA(subcmd->subtype == AT_DropPartition ? + "DROP PARTITION" : "DROP SUBPARTITION", 1, + "type", ObjTypeString, "drop partition"); + if (subcmd->name) { + append_string_object(tmp_obj, "%{name}I", "name", subcmd->name); + } else { + RangePartitionDefState* state = (RangePartitionDefState*)subcmd->def; + /* transformPartitionValue for maxvalues const */ + List *maxvalues = get_range_partition_maxvalues(state->boundary); + + append_array_object(tmp_obj, "FOR (%{maxvalues:, }s)", maxvalues); + } + + if (subcmd->alterGPI) { + append_format_string(tmp_obj, "UPDATE GLOBAL INDEX"); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_SetPartitionTableSpace: { + if (!subcmd->def) { + break; + } + if (IsA(subcmd->def, RangePartitionDefState)) { + RangePartitionDefState *p = (RangePartitionDefState*)subcmd->def; + List *maxvalues = get_range_partition_maxvalues(p->boundary); + tmp_obj = new_objtree_VA("MOVE PARTITION FOR (%{maxvalues:, }s) TABLESPACE %{tblspc}s", 3, + "type", ObjTypeString, "move partition", + "maxvalues", ObjTypeArray, maxvalues, + "tblspc", ObjTypeString, subcmd->name); + } else if (IsA(subcmd->def, RangeVar)) { + tmp_obj = new_objtree_VA("MOVE PARTITION %{partition}I TABLESPACE %{tblspc}s", 3, + "type", ObjTypeString, "move partition", + "partition", ObjTypeString, ((RangeVar*)subcmd->def)->relname, + "tblspc", ObjTypeString, subcmd->name); + } + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_TruncatePartition: { + if (subcmd->def) { + RangePartitionDefState *p = (RangePartitionDefState*)subcmd->def; + List *maxvalues = get_range_partition_maxvalues(p->boundary); + tmp_obj = new_objtree_VA("TRUNCATE PARTITION FOR (%{maxvalues:, }s)", 2, + "type", ObjTypeString, "truncate partition", + "maxvalues", ObjTypeArray, maxvalues); + } else { + tmp_obj = new_objtree_VA("TRUNCATE PARTITION %{partition}s", 2, + "type", ObjTypeString, "truncate partition", + "partition", ObjTypeString, subcmd->name); + } + + if (subcmd->alterGPI) { + append_format_string(tmp_obj, "UPDATE GLOBAL INDEX"); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_TruncateSubPartition: { + if (subcmd->def && IsA(subcmd->def, RangePartitionDefState)) { + RangePartitionDefState *p = (RangePartitionDefState*)subcmd->def; + List *maxvalues = get_range_partition_maxvalues(p->boundary); + + tmp_obj = new_objtree_VA("TRUNCATE SUBPARTITION FOR (%{maxvalues:, }s) %{gpi}s", 3, + "type", ObjTypeString, "truncate subpartition", + "maxvalues", ObjTypeArray, maxvalues, + "gpi", ObjTypeString, subcmd->alterGPI ? "UPDATE GLOBAL INDEX" : ""); + } else { + tmp_obj = new_objtree_VA("TRUNCATE SUBPARTITION %{partition}s %{gpi}s", 3, + "type", ObjTypeString, "truncate subpartition", + "partition", ObjTypeString, subcmd->name, + "gpi", ObjTypeString, subcmd->alterGPI ? "UPDATE GLOBAL INDEX" : ""); + } + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_ExchangePartition: { + if (subcmd->def) { + RangePartitionDefState *p = (RangePartitionDefState*)subcmd->def; + List *maxvalues = get_range_partition_maxvalues(p->boundary); + tmp_obj = new_objtree_VA( + "EXCHANGE %{issub}s FOR (%{maxvalues:, }s) WITH TABLE %{exchange_tbl}D", 4, + "type", ObjTypeString, "exchange partition", + "issub", ObjTypeString, + "PARTITION", + "maxvalues", ObjTypeArray, maxvalues, + "exchange_tbl", ObjTypeObject, + new_objtree_for_qualname_rangevar(subcmd->exchange_with_rel)); + } else { + tmp_obj = new_objtree_VA( + "EXCHANGE %{issub}s (%{partition}I) WITH TABLE %{exchange_tbl}D", 4, + "type", ObjTypeString, "exchange partition", + "issub", ObjTypeString, + "PARTITION", + "partition", ObjTypeString, subcmd->name, + "exchange_tbl", ObjTypeObject, + new_objtree_for_qualname_rangevar(subcmd->exchange_with_rel)); + } + + if (!subcmd->check_validation) { + append_format_string(tmp_obj, "WITHOUT VALIDATION"); + } + + if (subcmd->exchange_verbose) { + append_format_string(tmp_obj, "VERBOSE"); + } + if (subcmd->alterGPI) { + append_format_string(tmp_obj, "UPDATE GLOBAL INDEX"); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_MergePartition: { + ListCell *lc; + List *name_list = NIL; + foreach(lc, (List*)subcmd->def) { + Value* v = (Value*)lfirst(lc); + char *name = pstrdup(v->val.str); + name_list = lappend(name_list, new_string_object(name)); + } + + tmp_obj = new_objtree_VA("MERGE PARTITIONS %{name_list:, }s INTO PARTITION %{partition}s", 3, + "type", ObjTypeString, "merge partition", + "name_list", ObjTypeArray, name_list, + "partition", ObjTypeString, subcmd->name); + if (subcmd->target_partition_tablespace) { + append_string_object(tmp_obj, "TABLESPACE %{tblspc}s", + "tblspc", subcmd->target_partition_tablespace); + } + if (subcmd->alterGPI) { + append_format_string(tmp_obj, "UPDATE GLOBAL INDEX"); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + + case AT_SplitPartition: { + SplitPartitionState *s = (SplitPartitionState*)subcmd->def; + bool two_partiiton = false; + + tmp_obj = new_objtree_VA("SPLIT PARTITION", 1, + "type", ObjTypeString, "split partition"); + if (s->src_partition_name) { + append_string_object(tmp_obj, "%{partition}s", "partition", + s->src_partition_name); + } else if (s->partition_for_values) { + List *maxvalues = get_range_partition_maxvalues(s->partition_for_values); + append_array_object(tmp_obj, "FOR (%{maxvalues:, }s)", maxvalues); + } + + if (s->split_point) { + List *maxvalues = get_range_partition_maxvalues(s->split_point); + append_array_object(tmp_obj, "AT (%{maxvalues:, }s)", maxvalues); + + if (list_length(s->dest_partition_define_list) == 2) { + RangePartitionDefState *p1 = + (RangePartitionDefState*)linitial(s->dest_partition_define_list); + RangePartitionDefState *p2 = + (RangePartitionDefState*)lsecond(s->dest_partition_define_list); + + ObjTree *split_obj = new_objtree_VA( + "PARTITION %{name1}s %{tblspc1}s, PARTITION %{name2}s %{tblspc2}s", 4, + "name1", ObjTypeString, p1->partitionName, + "tblspc1", ObjTypeString, p1->tablespacename ? p1->tablespacename : "", + "name2", ObjTypeString, p2->partitionName, + "tblspc2", ObjTypeString, p2->tablespacename ? p2->tablespacename : ""); + + append_object_object(tmp_obj, "INTO (%{split_list}s)", split_obj); + two_partiiton = true; + } + } + + if (!two_partiiton) { + ListCell *lc; + List *partlist = NIL; + Oid *partkey_types = NULL; + int partkey_num = get_partition_key_types(relId, RELKIND_RELATION, &partkey_types); + + foreach(lc, s->dest_partition_define_list) { + ObjTree *part_obj; + RangePartitionDefState *p = (RangePartitionDefState*)lfirst(lc); + Oid partoid = InvalidOid; + part_obj = new_objtree_VA("PARTITION %{partname}s", 1, + "partname", ObjTypeString, p->partitionName); + + List *boundlist = deparse_partition_boudaries(relId, PARTTYPE_PARTITIONED_RELATION, + PART_STRATEGY_RANGE, p->partitionName, &partoid, partkey_num, partkey_types); + + append_array_object(part_obj, "VALUES LESS THAN (%{maxvalues:, }s)", boundlist); + if (p->tablespacename) { + append_string_object(part_obj, "TABLESPACE %{tblspc}s", "tblspc", p->tablespacename); + } + + partlist = lappend(partlist, new_object_object(part_obj)); + } + + append_array_object(tmp_obj, "INTO (%{partlist:, }s)", partlist); + } + + if (subcmd->alterGPI) { + append_format_string(tmp_obj, "UPDATE GLOBAL INDEX"); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_SplitSubPartition: { + SplitPartitionState *s = (SplitPartitionState*)subcmd->def; + ObjTree *define_list = NULL; + tmp_obj = new_objtree_VA("SPLIT SUBPARTITION %{name}s", 2, + "type", ObjTypeString, "split subpartition", + "name", ObjTypeString, s->src_partition_name); + + if (s->splitType == LISTSUBPARTITIION) { + List *maxvalues = get_list_partition_maxvalues(s->newListSubPartitionBoundry); + + append_array_object(tmp_obj, "VALUES (%{maxvalues:, }s)", maxvalues); + + if (list_length(s->dest_partition_define_list) == 2) { + ListPartitionDefState *p1 = (ListPartitionDefState*)linitial(s->dest_partition_define_list); + ListPartitionDefState *p2 = (ListPartitionDefState*)lsecond(s->dest_partition_define_list); + + define_list = new_objtree_VA( + "SUBPARTITION %{name1}s %{tblspc1}s, SUBPARTITION %{name2}s %{tblspc2}s", 4, + "name1", ObjTypeString, p1->partitionName, + "tblspc1", ObjTypeString, p1->tablespacename ? p1->tablespacename : "", + "name2", ObjTypeString, p2->partitionName, + "tblspc2", ObjTypeString, p2->tablespacename ? p2->tablespacename : ""); + } + if (define_list) { + append_object_object(tmp_obj, "INTO (%{define_list}s)", define_list); + } + } else if (s->splitType == RANGESUBPARTITIION) { + List *maxvalues = get_range_partition_maxvalues(s->split_point); + append_array_object(tmp_obj, "AT (%{maxvalues:, }s)", maxvalues); + + if (list_length(s->dest_partition_define_list) == 2) { + RangePartitionDefState *p1 = + (RangePartitionDefState*)linitial(s->dest_partition_define_list); + RangePartitionDefState *p2 = + (RangePartitionDefState*)lsecond(s->dest_partition_define_list); + + define_list = new_objtree_VA( + "SUBPARTITION %{name1}s %{tblspc1}s, SUBPARTITION %{name2}s %{tblspc2}s", 4, + "name1", ObjTypeString, p1->partitionName, + "tblspc1", ObjTypeString, p1->tablespacename ? p1->tablespacename : "", + "name2", ObjTypeString, p2->partitionName, + "tblspc2", ObjTypeString, p2->tablespacename ? p2->tablespacename : ""); + if (define_list) { + append_object_object(tmp_obj, "INTO (%{define_list}s)", define_list); + } + } + } + + if (subcmd->alterGPI) { + append_format_string(tmp_obj, "UPDATE GLOBAL INDEX"); + } + + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_ResetPartitionno: { + tmp_obj = new_objtree_VA("RESET PARTITION", 1, + "type", ObjTypeString, "reset partition"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + } + break; + case AT_UnusableIndex: + tmp_obj = new_objtree_VA("UNUSABLE", 1, + "type", ObjTypeString, "unusable index"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + + break; + case AT_UnusableIndexPartition: + tmp_obj = new_objtree_VA("MODIFY PARTITION %{partition_identity}I UNUSABLE ", 2, + "type", ObjTypeString, "unusable partition index", + "partition_identity", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_UnusableAllIndexOnPartition: { + if (!subcmd->def) { + tmp_obj = new_objtree_VA("MODIFY %{parttype} %{partition_identity}I UNUSABLE LOCAL INDEXES ", 3, + "type", ObjTypeString, "unusable all partition index", + "parttype", ObjTypeString, "PARTITION", + "partition_identity", ObjTypeString, subcmd->name); + } else if (IsA(subcmd->def, RangePartitionDefState)) { + RangePartitionDefState *p = (RangePartitionDefState *)subcmd->def; + List *maxvalues = get_range_partition_maxvalues(p->boundary); + + tmp_obj = new_objtree_VA("MODIFY %{parttype} FOR (%{maxvalues:, }s) UNUSABLE LOCAL INDEXES ", 3, + "type", ObjTypeString, "unusable all partition index", + "parttype", ObjTypeString, "PARTITION", + "maxvalues", ObjTypeArray, maxvalues); + } + } + break; + case AT_RebuildIndex: + tmp_obj = new_objtree_VA("REBUILD", 1, + "type", ObjTypeString, "rebuild index"); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_RebuildIndexPartition: + tmp_obj = new_objtree_VA("REBUILD PARTITION %{partition_identity}I", 2, + "type", ObjTypeString, "rebuild partition index", + "partition_identity", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + case AT_RebuildAllIndexOnPartition: + tmp_obj = new_objtree_VA("MODIFY PARTITION %{partition_identity}I REBUILD ALL INDEX", 2, + "type", ObjTypeString, "rebuild partition all index", + "partition_identity", ObjTypeString, subcmd->name); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; +#ifdef TODOLIST + case AT_GenericOptions: + tmp_obj = deparse_FdwOptions((List *) subcmd->def, NULL); + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; +#endif + default: + if (u_sess->hook_cxt.deparseCollectedCommandHook != NULL) { + tmp_obj = (ObjTree*)((deparseCollectedCommand)(u_sess->hook_cxt.deparseCollectedCommandHook)) + (ALTER_RELATION_SUBCMD, cmd, sub, context); + if (tmp_obj) { + subcmds = lappend(subcmds, new_object_object(tmp_obj)); + break; + } + } + + elog(WARNING, "unsupported alter table subtype %d for ddl logical replication", + subcmd->subtype); + + break; + } + + /* + * We don't support replicating ALTER TABLE which contains volatile + * functions because It's possible the functions contain DDL/DML in + * which case these operations will be executed twice and cause + * duplicate data. In addition, we don't know whether the tables being + * accessed by these DDL/DML are published or not. So blindly allowing + * such functions can allow unintended clauses like the tables + * accessed in those functions may not even exist on the subscriber. + */ + if (contain_volatile_functions((Node *) exprs)) + elog(ERROR, "ALTER TABLE command using volatile function cannot be replicated"); + + /* + * Clean the list as we already confirmed there is no volatile + * function. + */ + list_free(exprs); + exprs = NIL; + } + + table_close(rel, AccessShareLock); + + if (list_length(subcmds) == 0) + return NULL; + + append_array_object(ret, "%{subcmds:, }s", subcmds); + + return ret; +} + /* * Workhorse to deparse a CollectedCommand. */ -char * -deparse_utility_command(CollectedCommand *cmd, ddl_deparse_context *context) +char* deparse_utility_command(CollectedCommand *cmd, ddl_deparse_context *context) { OverrideSearchPath *overridePath; MemoryContext oldcxt; @@ -2437,7 +5091,10 @@ deparse_utility_command(CollectedCommand *cmd, ddl_deparse_context *context) case SCT_Simple: tree = deparse_simple_command(cmd, &context->include_owner); break; - + case SCT_AlterTable: + tree = deparse_AlterRelation(cmd, context); + context->include_owner = false; + break; default: elog(ERROR, "unexpected deparse node type %d", cmd->type); } @@ -2460,3 +5117,759 @@ deparse_utility_command(CollectedCommand *cmd, ddl_deparse_context *context) return command; } + + +static char* get_maxvalue_from_const(Const* maxvalue_item, char* default_str) +{ + int16 typlen = 0; + bool typbyval = false; + char typalign; + char typdelim; + Oid typioparam = InvalidOid; + Oid outfunc = InvalidOid; + char* maxvalue; + char* maxvalue_out; + + if (constIsNull(maxvalue_item)) { + maxvalue = pstrdup("NULL"); + } else if (constIsMaxValue(maxvalue_item)) { + maxvalue = pstrdup(default_str); + } else { + get_type_io_data(maxvalue_item->consttype, + IOFunc_output, + &typlen, + &typbyval, + &typalign, + &typdelim, + &typioparam, + &outfunc); + maxvalue_out = + DatumGetCString(OidFunctionCall1Coll(outfunc, maxvalue_item->constcollid, maxvalue_item->constvalue)); + + if (istypestring(maxvalue_item->consttype)) { + int nret = 0; + size_t len = strlen(maxvalue_out) + 3; + maxvalue = (char*)palloc0(len * sizeof(char)); + nret = snprintf_s(maxvalue, len, len - 1, "\'%s\'", maxvalue_out); + securec_check_ss(nret, "\0", "\0"); + } else { + maxvalue = pstrdup(maxvalue_out); + } + } + + return maxvalue; +} + +static List *get_range_partition_maxvalues(List* boundary) +{ + ListCell *lc; + List *maxvalues = NIL; + + foreach (lc, boundary) { + Const *maxvalue_item = (Const*)lfirst(lc); + char *maxvalue = get_maxvalue_from_const(maxvalue_item, "MAXVALUE"); + if (maxvalue) + maxvalues = lappend(maxvalues, new_string_object(maxvalue)); + } + + return maxvalues; +} + +/* see transformListPartitionValue */ +static List *get_list_partition_maxvalues(List *boundary) +{ + ListCell *lc; + List *maxvalues = NIL; + + foreach (lc, boundary) { + if (IsA(lfirst(lc), RowExpr)) { + /* for multi-keys list partition boundary, ((xx,xx),(xx,xx)) + * subpartition's partition key's length should be 1 + */ + RowExpr *r = (RowExpr*)lfirst(lc); + ListCell *lc2; + + StringInfoData tmpbuf; + int i = 0; + initStringInfo(&tmpbuf); + + foreach(lc2, r->args) { + Const *maxvalue_item = (Const*)lfirst(lc); + + if (i++ > 0) { + appendStringInfo(&tmpbuf, ","); + } + + char *maxvalue = get_maxvalue_from_const(maxvalue_item, "NULL"); + if (maxvalue) { + appendStringInfo(&tmpbuf, "%s", maxvalue); + } + } + appendStringInfo(&tmpbuf, ")"); + char* rowstr = pstrdup(tmpbuf.data); + maxvalues = lappend(maxvalues, new_string_object(rowstr)); + } else { + /* singel key */ + Const *maxvalue_item = (Const*)lfirst(lc); + char *maxvalue = get_maxvalue_from_const(maxvalue_item, "DEFAULT"); + if (maxvalue) + maxvalues = lappend(maxvalues, new_string_object(maxvalue)); + } + } + + return maxvalues; +} + +static int get_partition_key_types(Oid reloid, char parttype, Oid **partkey_types) +{ + Relation relation = NULL; + ScanKeyData key[2]; + SysScanDesc scan = NULL; + HeapTuple tuple = NULL; + bool isnull = false; + bool isPartExprKeyNull = false; + int partkeynum = 0; + + relation = heap_open(PartitionRelationId, AccessShareLock); + + ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(parttype)); + ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(reloid)); + + scan = systable_beginscan(relation, PartitionParentOidIndexId, true, NULL, 2, key); + if (HeapTupleIsValid(tuple = systable_getnext(scan))) { + Datum datum = 0; + datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partition_partkeyexpr, &isPartExprKeyNull); + + if (!isPartExprKeyNull) { + Node *partkeyexpr = NULL; + char *partkeystr = pstrdup(TextDatumGetCString(datum)); + if (partkeystr) + partkeyexpr = (Node *)stringToNode_skip_extern_fields(partkeystr); + else + ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("The partkeystr can't be NULL while getting partition key types"))); + if (!partkeyexpr) + ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("The partkeyexpr can't be NULL while getting partition key types"))); + + partkeynum = 1; + *partkey_types = (Oid*)palloc0(partkeynum * sizeof(Oid)); + if (partkeyexpr->type == T_OpExpr) + (*partkey_types)[0] = ((OpExpr*)partkeyexpr)->opresulttype; + else if (partkeyexpr->type == T_FuncExpr) + (*partkey_types)[0] = ((FuncExpr*)partkeyexpr)->funcresulttype; + else + ereport(ERROR, + (errcode(ERRCODE_NODE_ID_MISSMATCH), + errmsg("The node type %d is wrong, it must be T_OpExpr or T_FuncExpr", partkeyexpr->type))); + } else { + Oid *iPartboundary = NULL; + datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partition_partkey, &isnull); + + if (isnull) { + ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("can not find partkey while getting partition key types"))); + } else { + int2vector *partVec = (int2vector *)DatumGetPointer(datum); + partkeynum = partVec->dim1; + iPartboundary = (Oid *)palloc0(partkeynum * sizeof(Oid)); + for (int i = 0; i < partVec->dim1; i++) { + iPartboundary[i] = get_atttype(reloid, partVec->values[i]); + } + *partkey_types = iPartboundary; + } + } + } else { + systable_endscan(scan); + heap_close(relation, AccessShareLock); + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("could not find tuple for partition relation %u", reloid))); + } + + systable_endscan(scan); + heap_close(relation, AccessShareLock); + + return partkeynum; +} + +static ObjTree *deparse_add_subpartition(ObjTree *ret, Oid partoid, List *subPartitionDefState, int partkeynum, + Oid *partkey_types) +{ + ListCell *subcell; + List *sublist = NIL; + ObjTree *subobj = NULL; + Oid suboid = InvalidOid; + + foreach(subcell, subPartitionDefState) { + if (IsA((Node*)lfirst(subcell), ListPartitionDefState)) { + ListPartitionDefState *n = (ListPartitionDefState*)lfirst(subcell); + + List *subboundlist = + deparse_partition_boudaries(partoid, PARTTYPE_SUBPARTITIONED_RELATION, PART_STRATEGY_LIST, + n->partitionName, &suboid, partkeynum, partkey_types); + + subobj = new_objtree_VA("SUBPARTITION %{name}I VALUES (%{maxvalue:, }s)", 2, + "name", ObjTypeString, n->partitionName, + "maxvalue", ObjTypeArray, subboundlist); + if (n->tablespacename) { + append_string_object(subobj, "TABLESPACE %{tblspc}s", "tblspc", n->tablespacename); + } + } else if (IsA((Node*)lfirst(subcell), HashPartitionDefState)) { + HashPartitionDefState *n = (HashPartitionDefState*)lfirst(subcell); + + subobj = new_objtree_VA("SUBPARTITION %{name}I", 1, + "name", ObjTypeString, n->partitionName); + if (n->tablespacename) { + append_string_object(subobj, "TABLESPACE %{tblspc}s", "tblspc", n->tablespacename); + } + } else if (IsA((Node*)lfirst(subcell), RangePartitionDefState)) { + RangePartitionDefState *n = (RangePartitionDefState*)lfirst(subcell); + + List *subboundlist = + deparse_partition_boudaries(partoid, PARTTYPE_SUBPARTITIONED_RELATION, PART_STRATEGY_RANGE, + n->partitionName, &suboid, partkeynum, partkey_types); + + subobj = new_objtree_VA("SUBPARTITION %{name}I VALUES LESS THAN (%{maxvalue:, }s)", 2, + "name", ObjTypeString, n->partitionName, + "maxvalue", ObjTypeArray, subboundlist); + if (n->tablespacename) { + append_string_object(subobj, "TABLESPACE %{tblspc}s", "tblspc", n->tablespacename); + } + } else { + elog(ERROR, "unrecognize subpartiiton definition type %d", nodeTag((Node*)lfirst(subcell))); + } + sublist = lappend(sublist, new_object_object(subobj)); + } + + append_array_object(ret, "(%{subpartitions:, }s)", sublist); + return ret; +} + +static List *deparse_partition_boudaries(Oid parentoid, char reltype, char strategy, const char *partition_name, + Oid *partoid, int partkeynum, Oid *partkey_types) +{ + List *boundlist = NIL; + Relation relation = NULL; + ScanKeyData key[2]; + SysScanDesc scan = NULL; + HeapTuple tuple = NULL; + Oid thisoid = InvalidOid; + char *maxvalue_str = NULL; + + relation = heap_open(PartitionRelationId, AccessShareLock); + + ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(reltype)); + ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(parentoid)); + scan = systable_beginscan(relation, PartitionParentOidIndexId, true, NULL, 2, key); + while (HeapTupleIsValid(tuple = systable_getnext(scan))) { + bool datum_is_null; + Form_pg_partition foundpart = (Form_pg_partition)GETSTRUCT(tuple); + if (pg_strcasecmp(foundpart->relname.data, partition_name)) { + continue; + } + + Datum boundary_datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partition_boundaries, &datum_is_null); + if (datum_is_null) { + if (strategy == PART_STRATEGY_LIST) { + maxvalue_str = pstrdup("DEFAULT"); + boundlist = lappend(boundlist, new_string_object(maxvalue_str)); + thisoid = HeapTupleGetOid(tuple); + break; + } else { + systable_endscan(scan); + heap_close(relation, AccessShareLock); + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("could not find boundaries for partition %s", partition_name))); + } + } + + if (strategy == PART_STRATEGY_LIST && partkeynum > 1) { + /* see unnest function */ + ArrayType* arr = DatumGetArrayTypeP(boundary_datum); + + Datum dt = 0; + bool isnull = false; + + ArrayIterator it = array_create_iterator(arr, 0); + while (array_iterate(it, &dt, &isnull)) { + if (isnull) { + // default + maxvalue_str = pstrdup("DEFAULT"); + boundlist = lappend(boundlist, new_string_object(maxvalue_str)); + continue; + } + + StringInfoData tmpbuf; + initStringInfo(&tmpbuf); + + char* partvalue_str = TextDatumGetCString(dt); + Type targetType = typeidType(TEXTARRAYOID); + Datum partvalue_array_datum = stringTypeDatum(targetType, partvalue_str, TEXTOID, true); + ReleaseSysCache(targetType); + + ArrayType* partvalue_array = DatumGetArrayTypeP(partvalue_array_datum); + + Datum partkey_dt = 0; + bool partkey_isnull = false; + int it_index = 0; + + appendStringInfo(&tmpbuf, "("); + ArrayIterator partkey_it = array_create_iterator(partvalue_array, 0); + while (array_iterate(partkey_it, &partkey_dt, &partkey_isnull)) { + // OidInputFunctionCall + if (it_index > 0) { + appendStringInfo(&tmpbuf, ","); + } + + if (partkey_isnull) { + appendStringInfo(&tmpbuf, "NULL"); + continue; + } + + char *svalue = TextDatumGetCString(partkey_dt); + + if (istypestring(partkey_types[it_index])) { + appendStringInfo(&tmpbuf, "'%s'", svalue); + } else { + appendStringInfo(&tmpbuf, "%s", svalue); + } + ++it_index; + } + appendStringInfo(&tmpbuf, ")"); + maxvalue_str = pstrdup(tmpbuf.data); + if (maxvalue_str) { + boundlist = lappend(boundlist, new_string_object(maxvalue_str)); + } + } + } else { + List* boundary = untransformPartitionBoundary(boundary_datum); + ListCell *bcell; + int i = 0; + foreach (bcell, boundary) { + Value* maxvalue = (Value*)lfirst(bcell); + + if (i >= partkeynum) { + break; + } + + if (!PointerIsValid(maxvalue->val.str)) { + if (strategy == PART_STRATEGY_RANGE) { + maxvalue_str = pstrdup("MAXVALUE"); + } else { + maxvalue_str = pstrdup("DEFAULT"); + } + } else if (istypestring(partkey_types[i])) { + int nret = 0; + size_t len = strlen(maxvalue->val.str) + 3; + maxvalue_str = (char *)palloc0(len * sizeof(char)); + nret = snprintf_s(maxvalue_str, len, len - 1, "\'%s\'", maxvalue->val.str); + securec_check_ss(nret, "\0", "\0"); + } else { + maxvalue_str = pstrdup(maxvalue->val.str); + } + if (strategy == PART_STRATEGY_RANGE) + ++i; + + if (maxvalue_str) + boundlist = lappend(boundlist, new_string_object(maxvalue_str)); + } + } + + thisoid = HeapTupleGetOid(tuple); + break; + } + + systable_endscan(scan); + heap_close(relation, AccessShareLock); + + if (!OidIsValid(thisoid)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("could not find subpartition tuple for partition relation %s", partition_name))); + } + + if (partoid) + *partoid = thisoid; + + return boundlist; +} + +/* + * Handle deparsing setting of Function + * + * Verbose syntax + * RESET ALL + * OR + * SET %{set_name}I TO %{set_value}s + * OR + * RESET %{set_name}I + */ +static ObjTree* deparse_FunctionSet(VariableSetKind kind, char *name, char *value) +{ + ObjTree *ret; + struct config_generic *record; + + if (kind == VAR_RESET_ALL) { + ret = new_objtree("RESET ALL"); + } else if (kind == VAR_SET_VALUE) { + ret = new_objtree_VA("SET %{set_name}I", 1, + "set_name", ObjTypeString, name); + + /* + * Some GUC variable names are 'LIST' type and hence must not be + * quoted. + */ + record = find_option(name, false, ERROR); + if (record && (record->flags & GUC_LIST_QUOTE)) + append_string_object(ret, "TO %{set_value}s", "set_value", value); + else + append_string_object(ret, "TO %{set_value}L", "set_value", value); + } else { + ret = new_objtree_VA("RESET %{set_name}I", 1, + "set_name", ObjTypeString, name); + } + + return ret; +} + +/* + * Deparse a CreateFunctionStmt (CREATE FUNCTION) + * + * Given a function OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * Verbose syntax + * + * CREATE %{or_replace}s FUNCTION %{signature}s RETURNS %{return_type}s + * LANGUAGE %{transform_type}s %{language}I %{window}s %{volatility}s + * %{parallel_safety}s %{leakproof}s %{strict}s %{security_definer}s + * %{cost}s %{rows}s %{support}s %{set_options: }s AS %{objfile}L, + * %{symbol}L + */ +static ObjTree* deparse_CreateFunction(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + char *func_str; + size_t len; + + func_str = pg_get_functiondef_string(objectId); + len = strlen(func_str); + if (func_str[len - 1] == '\n' && func_str[len - 2] == '/' + && func_str[len - 3] == '\n' && func_str[len - 4] == ';') { + func_str[len - 2] = '\0'; + func_str[len - 1] = '\0'; + } + + ret = new_objtree_VA("%{function}s", 1, + "function", ObjTypeString, + func_str); + + return ret; +} + +/* + * Deparse an AlterFunctionStmt (ALTER FUNCTION/ROUTINE/PROCEDURE) + * + * Given a function OID and the parse tree that created it, return an ObjTree + * representing the alter command. + * + * Verbose syntax: + * ALTER FUNCTION/ROUTINE/PROCEDURE %{signature}s %{definition: }s + */ +static ObjTree* deparse_AlterFunction(Oid objectId, Node *parsetree) +{ + AlterFunctionStmt *node = (AlterFunctionStmt *) parsetree; + ObjTree *ret; + ObjTree *sign; + HeapTuple procTup; + Form_pg_proc procForm; + List *params = NIL; + List *elems = NIL; + ListCell *cell; + int i; + + /* Get the pg_proc tuple */ + procTup = SearchSysCache1(PROCOID, objectId); + if (!HeapTupleIsValid(procTup)) + elog(ERROR, "cache lookup failed for function with OID %u", objectId); + procForm = (Form_pg_proc) GETSTRUCT(procTup); + + if (node->isProcedure) + ret = new_objtree("ALTER PROCEDURE"); + else + ret = new_objtree("ALTER FUNCTION"); + + /* + * ALTER FUNCTION does not change signature so we can use catalog to get + * input type Oids. + */ + for (i = 0; i < procForm->pronargs; i++) { + ObjTree *tmp_obj; + + tmp_obj = new_objtree_VA("%{type}T", 1, + "type", ObjTypeObject, + new_objtree_for_type(procForm->proargtypes.values[i], -1)); + params = lappend(params, new_object_object(tmp_obj)); + } + + sign = new_objtree_VA("%{identity}D (%{arguments:, }s)", 2, + "identity", ObjTypeObject, + new_objtree_for_qualname_id(ProcedureRelationId, objectId), + "arguments", ObjTypeArray, params); + + append_object_object(ret, "%{signature}s", sign); + + foreach(cell, node->actions) { + DefElem *defel = (DefElem *) lfirst(cell); + ObjTree *tmp_obj = NULL; + + if (strcmp(defel->defname, "volatility") == 0) { + tmp_obj = new_objtree(strVal(defel->arg)); + } else if (strcmp(defel->defname, "strict") == 0) { + tmp_obj = new_objtree(intVal(defel->arg) ? + "RETURNS NULL ON NULL INPUT" : + "CALLED ON NULL INPUT"); + } else if (strcmp(defel->defname, "security") == 0) { + tmp_obj = PLSQL_SECURITY_DEFINER ? new_objtree(intVal(defel->arg) ? + "AUTHID DEFINER" : "AUTHID CURRENT_USER") : + new_objtree(intVal(defel->arg) ? + "SECURITY DEFINER" : "SECURITY INVOKER"); + } else if (strcmp(defel->defname, "leakproof") == 0) { + tmp_obj = new_objtree(intVal(defel->arg) ? + "LEAKPROOF" : "NOT LEAKPROOF"); + } else if (strcmp(defel->defname, "cost") == 0) { + tmp_obj = new_objtree_VA("COST %{cost}n", 1, + "cost", ObjTypeFloat, + defGetNumeric(defel)); + } else if (strcmp(defel->defname, "rows") == 0) { + tmp_obj = new_objtree("ROWS"); + if (defGetNumeric(defel) == 0) + append_not_present(tmp_obj, "%{rows}n"); + else + append_float_object(tmp_obj, "%{rows}n", + defGetNumeric(defel)); + } else if (strcmp(defel->defname, "set") == 0) { + VariableSetStmt *sstmt = (VariableSetStmt *) defel->arg; + char *value = ExtractSetVariableArgs(sstmt); + + tmp_obj = deparse_FunctionSet(sstmt->kind, sstmt->name, value); + } else if (strcmp(defel->defname, "parallel") == 0) { + tmp_obj = new_objtree_VA("PARALLEL %{value}s", 1, + "value", ObjTypeString, strVal(defel->arg)); + } + + elems = lappend(elems, new_object_object(tmp_obj)); + } + + append_array_object(ret, "%{definition: }s", elems); + + ReleaseSysCache(procTup); + + return ret; +} + +/* + * Deparse a CreateTrigStmt (CREATE TRIGGER) + * + * Given a trigger OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * Verbose syntax + * CREATE %{constraint}s TRIGGER %{name}I %{time}s %{events: OR }s ON + * %{relation}D %{from_table}s %{constraint_attrs: }s %{referencing: }s + * FOR EACH %{for_each}s %{when}s EXECUTE PROCEDURE %{function}s + */ +static ObjTree* deparse_CreateTrigStmt(Oid objectId, Node *parsetree) +{ + CreateTrigStmt *node = (CreateTrigStmt *) parsetree; + Relation pg_trigger; + HeapTuple trigTup; + Form_pg_trigger trigForm; + ObjTree *ret; + ObjTree *tmp_obj; + int tgnargs; + List *list = NIL; + List *events; + char *trigtiming; + Datum value; + bool isnull; + char *bodySrc; + + pg_trigger = table_open(TriggerRelationId, AccessShareLock); + + trigTup = get_catalog_object_by_oid(pg_trigger, objectId); + trigForm = (Form_pg_trigger) GETSTRUCT(trigTup); + + trigtiming = (char*)(node->timing == TRIGGER_TYPE_BEFORE ? "BEFORE" : + node->timing == TRIGGER_TYPE_AFTER ? "AFTER" : + node->timing == TRIGGER_TYPE_INSTEAD ? "INSTEAD OF" : + NULL); + if (!trigtiming) + elog(ERROR, "unrecognized trigger timing type %d", node->timing); + + /* DEFINER clause */ + tmp_obj = new_objtree("DEFINER"); + if (node->definer) + append_string_object(tmp_obj, "=%{definer}s", "definer", + node->definer); + else + append_not_present(tmp_obj, "=%{definer}s"); + + ret = new_objtree_VA("CREATE %{definer}s %{constraint}s TRIGGER %{if_not_exists}s %{name}I %{time}s", 5, + "definer", ObjTypeObject, tmp_obj, + "constraint", ObjTypeString, node->isconstraint ? "CONSTRAINT" : "", + "if_not_exists", ObjTypeString, node->if_not_exists ? "IF NOT EXISTS" : "", + "name", ObjTypeString, node->trigname, + "time", ObjTypeString, trigtiming); + + /* + * Decode the events that the trigger fires for. The output is a list; in + * most cases it will just be a string with the event name, but when + * there's an UPDATE with a list of columns, we return a JSON object. + */ + events = NIL; + if (node->events & TRIGGER_TYPE_INSERT) + events = lappend(events, new_string_object("INSERT")); + if (node->events & TRIGGER_TYPE_DELETE) + events = lappend(events, new_string_object("DELETE")); + if (node->events & TRIGGER_TYPE_TRUNCATE) + events = lappend(events, new_string_object("TRUNCATE")); + if (node->events & TRIGGER_TYPE_UPDATE) { + if (node->columns == NIL) { + events = lappend(events, new_string_object("UPDATE")); + } else { + ObjTree *update; + ListCell *cell; + List *cols = NIL; + + /* + * Currently only UPDATE OF can be objects in the output JSON, but + * we add a "kind" element so that user code can distinguish + * possible future new event types. + */ + update = new_objtree_VA("UPDATE OF", 1, + "kind", ObjTypeString, "update_of"); + + foreach(cell, node->columns) { + char *colname = strVal(lfirst(cell)); + + cols = lappend(cols, new_string_object(colname)); + } + + append_array_object(update, "%{columns:, }I", cols); + + events = lappend(events, new_object_object(update)); + } + } + append_array_object(ret, "%{events: OR }s", events); + + tmp_obj = new_objtree_for_qualname_id(RelationRelationId, + trigForm->tgrelid); + append_object_object(ret, "ON %{relation}D", tmp_obj); + + tmp_obj = new_objtree("FROM"); + if (trigForm->tgconstrrelid) { + ObjTree *rel; + + rel = new_objtree_for_qualname_id(RelationRelationId, + trigForm->tgconstrrelid); + append_object_object(tmp_obj, "%{relation}D", rel); + } else { + append_not_present(tmp_obj, "%{relation}D"); + } + append_object_object(ret, "%{from_table}s", tmp_obj); + + if (node->isconstraint) { + if (!node->deferrable) + list = lappend(list, new_string_object("NOT")); + list = lappend(list, new_string_object("DEFERRABLE INITIALLY")); + if (node->initdeferred) + list = lappend(list, new_string_object("DEFERRED")); + else + list = lappend(list, new_string_object("IMMEDIATE")); + } + append_array_object(ret, "%{constraint_attrs: }s", list); + + append_string_object(ret, "FOR EACH %{for_each}s", "for_each", + node->row ? "ROW" : "STATEMENT"); + + tmp_obj = new_objtree("WHEN"); + if (node->whenClause) { + Node *whenClause; + + value = fastgetattr(trigTup, Anum_pg_trigger_tgqual, + RelationGetDescr(pg_trigger), &isnull); + if (isnull) + elog(ERROR, "null tgqual for trigger \"%s\"", + NameStr(trigForm->tgname)); + + whenClause = (Node*)stringToNode(TextDatumGetCString(value)); + + append_string_object(tmp_obj, "(%{clause}s)", "clause", + pg_get_trigger_whenclause(trigForm, whenClause, false)); + } else { + append_not_present(tmp_obj, "%{clause}s"); + } + append_object_object(ret, "%{when}s", tmp_obj); + + if (node->funcname && !node->funcSource) { + tmp_obj = new_objtree_VA("%{funcname}D", 1, "funcname", ObjTypeObject, + new_objtree_for_qualname_id(ProcedureRelationId, trigForm->tgfoid)); + list = NIL; + tgnargs = trigForm->tgnargs; + if (tgnargs > 0) { + bytea *tgargs; + char *argstr; + int findx; + int lentgargs; + char *p; + + tgargs = DatumGetByteaP(fastgetattr(trigTup, + Anum_pg_trigger_tgargs, + RelationGetDescr(pg_trigger), + &isnull)); + if (isnull) + elog(ERROR, "null tgargs for trigger \"%s\"", NameStr(trigForm->tgname)); + argstr = (char *)VARDATA(tgargs); + lentgargs = VARSIZE_ANY_EXHDR(tgargs); + + p = argstr; + for (findx = 0; findx < tgnargs; findx++) { + size_t tlen; + + /* Verify that the argument encoding is correct */ + tlen = strlen(p); + if (p + tlen >= argstr + lentgargs) { + elog(ERROR, "invalid argument string (%s) for trigger \"%s\"", argstr, NameStr(trigForm->tgname)); + } + list = lappend(list, new_string_object(p)); + p += tlen + 1; + } + } + + append_format_string(tmp_obj, "("); + append_array_object(tmp_obj, "%{args:, }L", list); /* might be NIL */ + append_format_string(tmp_obj, ")"); + + append_object_object(ret, "EXECUTE PROCEDURE %{function}s", tmp_obj); + } + + if (node->funcSource && node->funcSource->bodySrc) { + bodySrc = pstrdup(node->funcSource->bodySrc); + if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT + && strlen(bodySrc) > BEGIN_P_LEN + && pg_strncasecmp(bodySrc, BEGIN_P_STR, BEGIN_P_LEN) == 0) { + errno_t rc = memcpy_s(bodySrc, strlen(bodySrc), BEGIN_N_STR, BEGIN_P_LEN); + securec_check(rc, "\0", "\0") + } + tmp_obj = new_objtree_VA("%{bodysrc}s", 1, + "bodysrc", ObjTypeString, bodySrc); + } else { + tmp_obj = new_objtree_VA("", 1, + "present", ObjTypeBool, false); + } + append_object_object(ret, "%{bodysrc}s", tmp_obj); + + table_close(pg_trigger, AccessShareLock); + + return ret; +} \ No newline at end of file diff --git a/src/gausskernel/optimizer/commands/event_trigger.cpp b/src/gausskernel/optimizer/commands/event_trigger.cpp index 4e40b89caf..a522c97b96 100644 --- a/src/gausskernel/optimizer/commands/event_trigger.cpp +++ b/src/gausskernel/optimizer/commands/event_trigger.cpp @@ -33,6 +33,7 @@ #include "parser/parse_func.h" #include "parser/parser.h" #include "parser/parse_relation.h" +#include "parser/parse_expr.h" #include "pgstat.h" #include "miscadmin.h" #include "utils/acl.h" @@ -111,6 +112,7 @@ static const event_trigger_support_data event_trigger_support[] = { {"PACKAGE SPECIFICATION", true}, {"ROW LEVEL SECURITY POLICY", true}, {"SYNONYM", true}, + {"EVENT", true}, { NULL, false } }; @@ -1614,6 +1616,7 @@ void EventTriggerAlterTableStart(Node *parsetree) command->d.alterTable.classId = RelationRelationId; command->d.alterTable.objectId = InvalidOid; + command->d.alterTable.rewrite = false; command->d.alterTable.subcmds = NIL; command->parsetree = (Node*)copyObject(parsetree); command->parent = currentEventTriggerState->currentCommand; @@ -1645,7 +1648,7 @@ void EventTriggerAlterTableRelid(Oid objectId) * itself, they are all concerned with AlterTableCmd nodes that are generated * internally, so that's all that this code needs to handle at the moment. */ -void EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) +void EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address, bool rewrite) { MemoryContext oldcxt; CollectedATSubcmd *newsub; @@ -1664,13 +1667,193 @@ void EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) newsub = (CollectedATSubcmd*) palloc(sizeof(CollectedATSubcmd)); newsub->address = address; newsub->parsetree = (Node*)copyObject(subcmd); - + + currentEventTriggerState->currentCommand->d.alterTable.rewrite |= rewrite; currentEventTriggerState->currentCommand->d.alterTable.subcmds = lappend(currentEventTriggerState->currentCommand->d.alterTable.subcmds, newsub); MemoryContextSwitchTo(oldcxt); } - + +/* + * EventTriggerAlterTypeStart + * Save data about a single part of an ALTER TYPE. + * + * ALTER TABLE can have multiple subcommands which might include DROP COLUMN + * command and ALTER TYPE referring the drop column in USING expression. + * As the dropped column cannot be accessed after the execution of DROP COLUMN, + * a special trigger is required to handle this case before the drop column is + * executed. + */ +void EventTriggerAlterTypeStart(AlterTableCmd *subcmd, Relation rel) +{ + MemoryContext oldcxt; + CollectedATSubcmd *newsub; + ColumnDef *def; + Relation attrelation; + HeapTuple heapTup; + Form_pg_attribute attTup; + AttrNumber attnum; + ObjectAddress address; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || currentEventTriggerState->commandCollectionInhibited) { + return; + } + + Assert(IsA(subcmd, AlterTableCmd)); + Assert(subcmd->subtype == AT_AlterColumnType); + Assert(currentEventTriggerState->currentCommand != NULL); + Assert(OidIsValid(currentEventTriggerState->currentCommand->d.alterTable.objectId)); + + def = (ColumnDef *) subcmd->def; + Assert(IsA(def, ColumnDef)); + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + + newsub = (CollectedATSubcmd*) palloc(sizeof(CollectedATSubcmd)); + newsub->parsetree = (Node *)copyObject(subcmd); + + attrelation = table_open(AttributeRelationId, RowExclusiveLock); + + /* Look up the target column */ + heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), subcmd->name); + if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */ + ereport(ERROR, + ( + errmsg("column \"%s\" of relation \"%s\" does not exist", + subcmd->name, RelationGetRelationName(rel)))); + + attTup = (Form_pg_attribute) GETSTRUCT(heapTup); + attnum = attTup->attnum; + + ObjectAddressSubSet(address, RelationRelationId, + RelationGetRelid(rel), attnum); + heap_freetuple(heapTup); + table_close(attrelation, RowExclusiveLock); + newsub->address = address; + newsub->usingexpr = NULL; + + if (def->raw_default) { + OverrideSearchPath *overridePath; + char *defexpr; + + /* + * We want all object names to be qualified when deparsing the + * expression, so that results are "portable" to environments with + * different search_path settings. Rather than inject what would be + * repetitive calls to override search path all over the place, we do + * it centrally here. + */ + if (def->cooked_default) { + defexpr = nodeToString(def->cooked_default); + } else { + ParseState* pstate = make_parsestate(NULL); + RangeTblEntry* rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + addRTEtoQuery(pstate, rte, false, true, true); + Node* transform = transformExpr(pstate, def->raw_default, EXPR_KIND_ALTER_COL_TRANSFORM); + + AlterTableCmd *curcmd = (AlterTableCmd*)(newsub->parsetree); + ColumnDef *curdef = (ColumnDef *) curcmd->def; + curdef->cooked_default = transform; + + defexpr = nodeToString(transform); + } + + overridePath = GetOverrideSearchPath(CurrentMemoryContext); + overridePath->schemas = NIL; + overridePath->addCatalog = false; + overridePath->addTemp = true; + PushOverrideSearchPath(overridePath); + + newsub->usingexpr = + TextDatumGetCString(DirectFunctionCall2(pg_get_expr, CStringGetTextDatum(defexpr), RelationGetRelid(rel))); + PopOverrideSearchPath(); + } + + currentEventTriggerState->currentCommand->d.alterTable.subcmds = + lappend(currentEventTriggerState->currentCommand->d.alterTable.subcmds, newsub); + + MemoryContextSwitchTo(oldcxt); +} + +/* + * EventTriggerAlterTypeEnd + * Finish up saving an ALTER TYPE command, and add it to command list. + */ +void EventTriggerAlterTypeEnd(Node *subcmd, ObjectAddress address, int rewrite) +{ + MemoryContext oldcxt; + CollectedATSubcmd *newsub; + ListCell *cell; + CollectedCommand *cmd; + AlterTableCmd *altsubcmd = (AlterTableCmd *)subcmd; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || currentEventTriggerState->commandCollectionInhibited) { + return; + } + + cmd = currentEventTriggerState->currentCommand; + + Assert(IsA(subcmd, AlterTableCmd)); + Assert(cmd != NULL); + Assert(OidIsValid(cmd->d.alterTable.objectId)); + + foreach(cell, cmd->d.alterTable.subcmds) { + CollectedATSubcmd *sub = (CollectedATSubcmd *) lfirst(cell); + AlterTableCmd *collcmd = (AlterTableCmd *) sub->parsetree; + + if (collcmd->subtype == altsubcmd->subtype && + address.classId == sub->address.classId && + address.objectId == sub->address.objectId && + address.objectSubId == sub->address.objectSubId) { + cmd->d.alterTable.rewrite |= rewrite; + return; + } + } + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + + newsub = (CollectedATSubcmd*)palloc(sizeof(CollectedATSubcmd)); + newsub->address = address; + newsub->parsetree = (Node *) copyObject(subcmd); + + cmd->d.alterTable.rewrite |= rewrite; + cmd->d.alterTable.subcmds = lappend(cmd->d.alterTable.subcmds, newsub); + + MemoryContextSwitchTo(oldcxt); +} + +void EventTriggerAlterTypeUpdate(ObjectAddress address, AttrNumber old_attnum) +{ + ListCell *cell; + CollectedCommand *cmd; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || currentEventTriggerState->commandCollectionInhibited) { + return; + } + + cmd = currentEventTriggerState->currentCommand; + + Assert(cmd != NULL); + Assert(OidIsValid(cmd->d.alterTable.objectId)); + + foreach(cell, cmd->d.alterTable.subcmds) { + CollectedATSubcmd *sub = (CollectedATSubcmd *) lfirst(cell); + AlterTableCmd *collcmd = (AlterTableCmd *) sub->parsetree; + + if (collcmd->subtype == AT_AlterColumnType&& + address.classId == sub->address.classId && + address.objectId == sub->address.objectId && + old_attnum == sub->address.objectSubId) { + sub->address.objectSubId = address.objectSubId; + return; + } + } +} + /* * EventTriggerAlterTableEnd * Finish up saving an ALTER TABLE command, and add it to command list. diff --git a/src/gausskernel/optimizer/commands/eventcmds.cpp b/src/gausskernel/optimizer/commands/eventcmds.cpp index 6324eab3eb..258e50ca01 100755 --- a/src/gausskernel/optimizer/commands/eventcmds.cpp +++ b/src/gausskernel/optimizer/commands/eventcmds.cpp @@ -199,6 +199,28 @@ Datum ParseIntevalExpr(Node *intervalNode) return CStringGetTextDatum(buf.data); } +char* parseIntervalExprString(Node *intervalNode) +{ + char *res; + StringInfoData buf; + initStringInfo(&buf); + + TypeCast *tc = (TypeCast *)intervalNode; + A_Const *ac = (A_Const *)tc->arg; + A_Const *tm = (A_Const *)lfirst(list_head(tc->typname->typmods)); + const char *tm_str = NULL; + tm_str = IntervalTypmodParse(tm); + if (IsA(&ac->val, Integer)) { + char *quantity_str = (char *)palloc(INTERVAL_QUALITY_LENGTH); + pg_itoa((int)ac->val.val.ival, quantity_str); + appendStringInfo(&buf, " \'%s\' ", quantity_str); + } else if (IsA(&ac->val, String) || IsA(&ac->val, Float)) { + appendStringInfo(&buf, " \'%s\' ", (char *)ac->val.val.str); + } + appendStringInfo(&buf, "%s", tm_str); + return pstrdup(buf.data); +} + Datum ExecTimeExpr(Node *node) { /* Check whether the execution result of the time expression is of the timestamp type */ @@ -232,6 +254,14 @@ Datum ExecTimeExpr(Node *node) return result; } +char* parseTimeExprString(Node* timeEexpr) +{ + Datum timeres; + + timeres = ExecTimeExpr(timeEexpr); + return DatumGetCString(DirectFunctionCall1(timestamp_out, timeres)); +} + void GetTimeExecResult(CreateEventStmt *stmt, Datum &start_time, Datum &interval_time, Datum &end_time) { /* Parse Interval Expression */ @@ -453,19 +483,28 @@ void CheckEventPrivilege(char* schema_name, char* event_name, AclMode mode, bool ReleaseSysCache(tup); } -void CreateEventCommand(CreateEventStmt *stmt) +ObjectAddress CreateEventCommand(CreateEventStmt *stmt) { + ObjectAddress myself; + char *event_name_str = stmt->event_name->relname; char *schema_name_str = (stmt->event_name->schemaname) ? stmt->event_name->schemaname : get_real_search_schema(); + if (!stmt->event_name->schemaname) { + stmt->event_name->schemaname = pstrdup(schema_name_str); + } CheckEventPrivilege(schema_name_str, event_name_str, ACL_CREATE, true); + myself.classId = PgJobRelationId; + myself.objectId = 0; + myself.objectSubId = 0; + Datum schema_name = DirectFunctionCall1(namein, CStringGetDatum(schema_name_str)); Datum ev_name = CStringGetTextDatum(event_name_str); FunctionCallInfoData ev_arg; const short nrgs_job = ARG_19; if (CheckEventExists(ev_name, stmt->if_not_exists)) { - return; + return myself; } InitFunctionCallInfoData(ev_arg, NULL, nrgs_job, InvalidOid, NULL, NULL); @@ -478,6 +517,8 @@ void CreateEventCommand(CreateEventStmt *stmt) PrepareFuncArg(stmt, ev_name, schema_name, &ev_arg); create_job_raw(&ev_arg); + + return myself; } Datum GetInlineJobName(Datum ev_name) @@ -837,10 +878,14 @@ void UpdatePgJobParam(AlterEventStmt *stmt, Datum ev_name) UpdateMultiJob(ev_name, values, nulls, replaces); } -void AlterEventCommand(AlterEventStmt *stmt) +ObjectAddress AlterEventCommand(AlterEventStmt *stmt) { + ObjectAddress myself; char *event_name_str = stmt->event_name->relname; char *schema_name_str = (stmt->event_name->schemaname) ? stmt->event_name->schemaname : get_real_search_schema(); + if (!stmt->event_name->schemaname) { + stmt->event_name->schemaname = pstrdup(schema_name_str); + } Datum ev_name = CStringGetTextDatum(event_name_str); CheckEventPrivilege(schema_name_str, event_name_str, ACL_USAGE, false); @@ -859,13 +904,20 @@ void AlterEventCommand(AlterEventStmt *stmt) UpdateAttributeParam(stmt, ev_name); UpdatePgJobProcParam(stmt, ev_name); UpdatePgJobParam(stmt, ev_name); + + myself.classId = PgJobRelationId; + myself.objectId = 0; + myself.objectSubId = 0; + return myself; } void DropEventCommand(DropEventStmt *stmt) { char *event_name_str = stmt->event_name->relname; char *schema_name_str = (stmt->event_name->schemaname) ? stmt->event_name->schemaname : get_real_search_schema(); - + if (!stmt->event_name->schemaname) { + stmt->event_name->schemaname = pstrdup(schema_name_str); + } Datum ev_name = CStringGetTextDatum(event_name_str); if (CheckEventNotExists(ev_name, stmt->missing_ok)) { return; diff --git a/src/gausskernel/optimizer/commands/publicationcmds.cpp b/src/gausskernel/optimizer/commands/publicationcmds.cpp index 9b771e41e7..4ff29d90ce 100644 --- a/src/gausskernel/optimizer/commands/publicationcmds.cpp +++ b/src/gausskernel/optimizer/commands/publicationcmds.cpp @@ -59,13 +59,14 @@ static void CloseTableList(List *rels); static void PublicationAddTables(Oid pubid, List *rels, bool if_not_exists, AlterPublicationStmt *stmt); static void PublicationDropTables(Oid pubid, List *rels, bool missing_ok); -static void parse_publication_options(List *options, - bool *publish_given, - bool *publish_insert, - bool *publish_update, - bool *publish_delete, - bool *publish_ddl_given, - int64 *pubddl) +static void parse_publication_options(List *options, + bool *publish_given, + bool *publish_insert, + bool *publish_update, + bool *publish_delete, + bool *publish_truncate, + bool *publish_ddl_given, + int64 *pubddl) { ListCell *lc; @@ -76,6 +77,7 @@ static void parse_publication_options(List *options, *publish_insert = true; *publish_update = true; *publish_delete = true; + *publish_truncate = true; *pubddl = 0; /* Parse options */ @@ -97,6 +99,7 @@ static void parse_publication_options(List *options, *publish_insert = false; *publish_update = false; *publish_delete = false; + *publish_truncate = false; *publish_given = true; publish = defGetString(defel); @@ -113,6 +116,8 @@ static void parse_publication_options(List *options, *publish_update = true; else if (strcmp(publish_opt, "delete") == 0) *publish_delete = true; + else if (strcmp(publish_opt, "truncate") == 0) + *publish_truncate = true; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized \"publish\" value: \"%s\"", publish_opt))); @@ -200,23 +205,51 @@ CreateDDLReplicaEventTrigger(char *eventname, List *commands, ObjectAddress puba recordDependencyOn(&referenced, &pubaddress, DEPENDENCY_INTERNAL); } -static void -AddAllDDLReplicaEventTriggers(List *end_commands) +static void AddAllDDLReplicaEventTriggers(List *end_commands) { end_commands = lappend(end_commands, makeString("CREATE INDEX")); end_commands = lappend(end_commands, makeString("DROP INDEX")); + end_commands = lappend(end_commands, makeString("ALTER INDEX")); + end_commands = lappend(end_commands, makeString("CREATE SEQUENCE")); + end_commands = lappend(end_commands, makeString("ALTER SEQUENCE")); + end_commands = lappend(end_commands, makeString("DROP SEQUENCE")); + end_commands = lappend(end_commands, makeString("CREATE SCHEMA")); + end_commands = lappend(end_commands, makeString("ALTER SCHEMA")); + end_commands = lappend(end_commands, makeString("DROP SCHEMA")); + end_commands = lappend(end_commands, makeString("COMMENT")); + end_commands = lappend(end_commands, makeString("CREATE VIEW")); + end_commands = lappend(end_commands, makeString("ALTER VIEW")); + end_commands = lappend(end_commands, makeString("DROP VIEW")); + end_commands = lappend(end_commands, makeString("CREATE FUNCTION")); + end_commands = lappend(end_commands, makeString("ALTER FUNCTION")); + end_commands = lappend(end_commands, makeString("DROP FUNCTION")); + end_commands = lappend(end_commands, makeString("CREATE TRIGGER")); + end_commands = lappend(end_commands, makeString("ALTER TRIGGER")); + end_commands = lappend(end_commands, makeString("DROP TRIGGER")); + end_commands = lappend(end_commands, makeString("CREATE TYPE")); + end_commands = lappend(end_commands, makeString("ALTER TYPE")); + end_commands = lappend(end_commands, makeString("DROP TYPE")); + if(DB_IS_CMPT(B_FORMAT)) { + end_commands = lappend(end_commands, makeString("CREATE EVENT")); + end_commands = lappend(end_commands, makeString("ALTER EVENT")); + end_commands = lappend(end_commands, makeString("DROP EVENT")); + } } /* * If DDL replication is enabled, create event triggers to capture and log any * relevant events. */ -static void -CreateDDLReplicaEventTriggers(ObjectAddress pubaddress, Oid puboid) +static void CreateDDLReplicaEventTriggers(ObjectAddress pubaddress, Oid puboid) { List *start_commands = list_make1(makeString("DROP TABLE")); - List *end_commands = NIL; - end_commands = lappend(end_commands, makeString("CREATE TABLE")); + start_commands = lappend(start_commands, makeString("DROP INDEX")); + start_commands = lappend(start_commands, makeString("DROP TYPE")); + + List *rewrite_commands = list_make1(makeString("ALTER TABLE")); + + List *end_commands = list_make1(makeString("CREATE TABLE")); + end_commands = lappend(end_commands, makeString("ALTER TABLE")); end_commands = lappend(end_commands, makeString("DROP TABLE")); AddAllDDLReplicaEventTriggers(end_commands); @@ -225,13 +258,15 @@ CreateDDLReplicaEventTriggers(ObjectAddress pubaddress, Oid puboid) /* Create the ddl_command_start event trigger */ CreateDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_START, start_commands, pubaddress, puboid); + + /* Create the table_rewrite event trigger */ + CreateDDLReplicaEventTrigger(PUB_TRIG_TBL_REWRITE, rewrite_commands, pubaddress, puboid); } /* * Helper function to drop an event trigger for DDL replication. */ -static void -DropDDLReplicaEventTrigger(char *eventname, Oid puboid) +static void DropDDLReplicaEventTrigger(char *eventname, Oid puboid) { char trigger_name[NAMEDATALEN]; Oid evtoid; @@ -260,11 +295,11 @@ DropDDLReplicaEventTrigger(char *eventname, Oid puboid) /* * Drop all the event triggers which are used for DDL replication. */ -static void -DropDDLReplicaEventTriggers(Oid puboid) +static void DropDDLReplicaEventTriggers(Oid puboid) { DropDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_START, puboid); DropDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_END, puboid); + DropDDLReplicaEventTrigger(PUB_TRIG_TBL_REWRITE, puboid); } /* @@ -287,6 +322,7 @@ ObjectAddress CreatePublication(CreatePublicationStmt *stmt) bool publish_insert; bool publish_update; bool publish_delete; + bool publish_truncate; bool publish_ddl_given; int64 pubddl; AclResult aclresult; @@ -319,15 +355,15 @@ ObjectAddress CreatePublication(CreatePublicationStmt *stmt) values[Anum_pg_publication_pubname - 1] = DirectFunctionCall1(namein, CStringGetDatum(stmt->pubname)); values[Anum_pg_publication_pubowner - 1] = ObjectIdGetDatum(GetUserId()); - parse_publication_options(stmt->options, &publish_given, - &publish_insert, &publish_update, &publish_delete, - &publish_ddl_given, &pubddl); + parse_publication_options(stmt->options, &publish_given, &publish_insert, &publish_update, &publish_delete, + &publish_truncate, &publish_ddl_given, &pubddl); values[Anum_pg_publication_puballtables - 1] = BoolGetDatum(stmt->for_all_tables); values[Anum_pg_publication_pubinsert - 1] = BoolGetDatum(publish_insert); values[Anum_pg_publication_pubupdate - 1] = BoolGetDatum(publish_update); values[Anum_pg_publication_pubdelete - 1] = BoolGetDatum(publish_delete); values[Anum_pg_publication_pubddl - 1] = Int64GetDatum(pubddl); + values[Anum_pg_publication_pubtruncate - 1] = BoolGetDatum(publish_truncate); tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); @@ -396,6 +432,7 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He bool publish_insert; bool publish_update; bool publish_delete; + bool publish_truncate; bool publish_ddl_given; int64 pubddl; bool pubddl_change = false; @@ -404,9 +441,8 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He Form_pg_publication pubform; pubform = (Form_pg_publication)GETSTRUCT(tup); - parse_publication_options(stmt->options, &publish_given, - &publish_insert, &publish_update, &publish_delete, - &publish_ddl_given, &pubddl); + parse_publication_options(stmt->options, &publish_given, &publish_insert, &publish_update, &publish_delete, + &publish_truncate, &publish_ddl_given, &pubddl); /* Everything ok, form a new tuple. */ rc = memset_s(values, sizeof(values), 0, sizeof(values)); @@ -425,6 +461,9 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He values[Anum_pg_publication_pubdelete - 1] = BoolGetDatum(publish_delete); replaces[Anum_pg_publication_pubdelete - 1] = true; + + values[Anum_pg_publication_pubtruncate - 1] = BoolGetDatum(publish_truncate); + replaces[Anum_pg_publication_pubtruncate - 1] = true; } if (publish_ddl_given) { diff --git a/src/gausskernel/optimizer/commands/subscriptioncmds.cpp b/src/gausskernel/optimizer/commands/subscriptioncmds.cpp index 440eb0a96c..b0c32e6c51 100644 --- a/src/gausskernel/optimizer/commands/subscriptioncmds.cpp +++ b/src/gausskernel/optimizer/commands/subscriptioncmds.cpp @@ -94,6 +94,8 @@ static void ValidateReplicationSlot(char *slotname, List *publications); static List *fetch_table_list(List *publications); static void ReportSlotConnectionError(List *rstates, Oid subid, char *slotname); static bool CheckPublicationsExistOnPublisher(List *publications); +static bool CheckCompatibilityForDDLPublications(const char* conninfo, List *publications); +static bool CheckDDLPublicationsExists(List *publications); /* * Common option parsing function for CREATE and ALTER SUBSCRIPTION commands. @@ -617,6 +619,14 @@ ObjectAddress CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) ereport(ERROR, (errmsg("There are some publications not exist on the publisher."))); } + if (!CheckCompatibilityForDDLPublications(encryptConninfo, publications)) { + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); + ereport( + ERROR, + (errmsg( + "There are some publications replicate ddl but dbcompatibility is different with subscriptor"))); + } + /* * If requested, create the replication slot on remote side for our * newly created subscription. @@ -1007,6 +1017,15 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt, bool isTopLevel) (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); ereport(ERROR, (errmsg("There are some publications not exist on the publisher."))); } + + if (!CheckCompatibilityForDDLPublications(encryptConninfo, opts.publications)) { + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); + ereport( + ERROR, + (errmsg( + "There are some publications replicate ddl but dbcompatibility is different with subscriptor"))); + } + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); } @@ -1029,6 +1048,14 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt, bool isTopLevel) ereport(ERROR, (errmsg("There are some publications not exist on the publisher."))); } + if (!CheckCompatibilityForDDLPublications(encryptConninfo, opts.publications)) { + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); + ereport( + ERROR, + (errmsg( + "There are some publications replicate ddl but dbcompatibility is different with subscriptor"))); + } + if (createSlot) { CreateSlotInPublisherAndInsertSubRel(finalSlotName, subid, opts.publications, NULL, true); } @@ -1718,3 +1745,136 @@ static bool CheckPublicationsExistOnPublisher(List *publications) return exists; } + + +static bool CheckCompatibilityForDDLPublications(const char* conninfo, List *publications) +{ + Assert(list_length(publications) > 0); + bool checkres = false; + char *datname = NULL; + List* conninfoList = ConninfoToDefList(conninfo); + ListCell* l = NULL; + foreach (l, conninfoList) { + DefElem* defel = (DefElem*)lfirst(l); + if (pg_strcasecmp(defel->defname, "dbname") == 0) { + datname = defGetString(defel); + } + } + + if (!datname) { + ereport(ERROR, (errmsg("Failed to get dbname from the conninfo."))); + } + + StringInfoData cmd; + initStringInfo(&cmd); + appendStringInfo(&cmd, + "SELECT d.datcompatibility FROM pg_catalog.pg_attribute a JOIN pg_database d " + "ON a.attrelid=pg_catalog.regclass('pg_publication') AND a.attname='pubddl' " + "AND d.datname='%s'", + datname); + + WalRcvExecResult *res; + Oid tableRow[1] = {NAMEOID}; + res = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_exec(cmd.data, 1, tableRow); + + FreeStringInfo(&cmd); + ClearListContent(conninfoList); + list_free_ext(conninfoList); + + if (res->status != WALRCV_OK_TUPLES) { + ereport(ERROR, (errmsg("Failed to get publication datcompatibility from the publisher."))); + } + + if (tuplestore_get_memtupcount(res->tuplestore) > 0) { + checkres = true; + char *datcompatibility = NULL; + bool isnull = false; + int expected_db_cmpt = u_sess->attr.attr_sql.sql_compatibility; + TupleTableSlot *slot = MakeSingleTupleTableSlot(res->tupledesc); + if (tuplestore_gettupleslot(res->tuplestore, true, false, slot)) { + Datum datum = tableam_tslot_getattr(slot, 1, &isnull); + Assert(!isnull); + + datcompatibility = pstrdup(NameStr(*(DatumGetName(datum)))); + + switch (expected_db_cmpt) { + case A_FORMAT: + checkres = !pg_strcasecmp(datcompatibility, g_dbCompatArray[DB_CMPT_A].name); + break; + case B_FORMAT: + checkres = !pg_strcasecmp(datcompatibility, g_dbCompatArray[DB_CMPT_B].name); + break; + case C_FORMAT: + checkres = !pg_strcasecmp(datcompatibility, g_dbCompatArray[DB_CMPT_C].name); + break; + case PG_FORMAT: + checkres = !pg_strcasecmp(datcompatibility, g_dbCompatArray[DB_CMPT_PG].name); + break; + default: + checkres = false; + } + + ExecClearTuple(slot); + walrcv_clear_result(res); + pfree(datcompatibility); + + /* now check if the publications have ddl publications + * if there is no ddl replication, the different dbcompatibility + * is acceptable + */ + if (!checkres) { + if (!CheckDDLPublicationsExists(publications)) { + checkres = true; + } + } + } else { + /* can not get tupleslot */ + checkres = false; + } + } else { + /* there is no pubddl column in pg_publication, + * maybe publisher have not support ddl replication yet + */ + walrcv_clear_result(res); + checkres = true; + } + + return checkres; +} + +static bool CheckDDLPublicationsExists(List *publications) +{ + StringInfoData cmd; + initStringInfo(&cmd); + appendStringInfo(&cmd, "SELECT 1 FROM pg_catalog.pg_publication t" + " WHERE t.pubddl != 0 AND t.pubname IN ("); + ListCell *lc; + bool first = true; + foreach (lc, publications) { + char *pubname = strVal(lfirst(lc)); + if (first) { + first = false; + } else { + appendStringInfoString(&cmd, ", "); + } + appendStringInfo(&cmd, "%s", quote_literal_cstr(pubname)); + } + appendStringInfoString(&cmd, ")"); + + WalRcvExecResult *res; + Oid tableRow[1] = {INT4OID}; + res = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_exec(cmd.data, 1, tableRow); + FreeStringInfo(&cmd); + + if (res->status != WALRCV_OK_TUPLES) { + ereport(ERROR, (errmsg("Failed to get DDL publication list from the publisher."))); + } + bool exists = false; + if (tuplestore_get_memtupcount(res->tuplestore) > 0) { + exists = true; + } + + walrcv_clear_result(res); + + return exists; +} diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 7e038b65ac..4fe2a236ca 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -139,6 +139,7 @@ #include "storage/tcap.h" #include "streaming/streaming_catalog.h" #include "tcop/utility.h" +#include "tcop/ddldeparse.h" #include "utils/acl.h" #include "utils/aiomem.h" #include "utils/builtins.h" @@ -278,6 +279,8 @@ typedef struct NewColumnValue { bool is_generated; /* is it a GENERATED expression? */ bool is_autoinc; bool is_addloc; /* is add column first or after */ + bool is_alter_using; /* have alter type using clause */ + bool make_dml_change; AttrNumber newattnum; /* is modify column first or after -1 denote add; 0 denote modify without first|after; @@ -797,6 +800,37 @@ static void check_unsupported_charset_for_column(Oid collation, const char* col_ static void AlterTableNamespaceDependentProcess(Relation classRel ,Relation rel, Oid oldNspOid, Oid nspOid, ObjectAddresses* objsMoved, char* newrelname); + +static inline void validate_relation_kind(Relation r) +{ + if (r->rd_rel->relkind == RELKIND_INDEX || + // r->rd_rel->relkind == RELKIND_PARTITIONED_INDEX || + r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) + elog(ERROR, + "cannot open relation \"%s\"", + RelationGetRelationName(r)); +} + +/* ---------------- + * table_open - open a table relation by relation OID + * + * This is essentially relation_open plus check that the relation + * is not an index nor a composite type. (The caller should also + * check that it's not a view or foreign table before assuming it has + * storage.) + * ---------------- + */ +Relation table_open(Oid relationId, LOCKMODE lockmode) +{ + Relation r; + + r = relation_open(relationId, lockmode); + + validate_relation_kind(r); + + return r; +} + inline static bool CStoreSupportATCmd(AlterTableType cmdtype) { bool ret = false; @@ -4292,15 +4326,9 @@ void ExecuteTruncate(TruncateStmt* stmt) { List* rels = NIL; List* relids = NIL; - List* seq_relids = NIL; - List* autoinc_seqoids = NIL; + List* relids_logged = NIL; List* rels_in_redis = NIL; - EState* estate = NULL; - ResultRelInfo* resultRelInfos = NULL; - ResultRelInfo* resultRelInfo = NULL; - SubTransactionId mySubid; ListCell* cell = NULL; - bool isDfsTruncate = false; #ifdef PGXC char* FirstExecNode = NULL; bool isFirstNode = false; @@ -4430,6 +4458,10 @@ void ExecuteTruncate(TruncateStmt* stmt) rels = lappend(rels, rel); relids = lappend_oid(relids, myrelid); + /* Log this relation only if needed for logical decoding */ + if (RelationIsLogicallyLogged(rel)) + relids_logged = lappend_oid(relids_logged, myrelid); + if (recurse) { ListCell* child = NULL; List* children = NIL; @@ -4447,6 +4479,9 @@ void ExecuteTruncate(TruncateStmt* stmt) truncate_check_rel(rel); rels = lappend(rels, rel); relids = lappend_oid(relids, childrelid); + /* Log this relation only if needed for logical decoding */ + if (RelationIsLogicallyLogged(rel)) + relids_logged = lappend_oid(relids_logged, childrelid); } } @@ -4518,21 +4553,66 @@ void ExecuteTruncate(TruncateStmt* stmt) #endif } + ExecuteTruncateGuts(rels, relids, relids_logged, rels_in_redis, + stmt->behavior, stmt->restart_seqs, stmt); + + /* And close the rels */ + foreach(cell, rels) + { + Relation rel = (Relation) lfirst(cell); + + heap_close(rel, NoLock); + } +} + + +/* + * ExecuteTruncateGuts + * + * Internal implementation of TRUNCATE. This is called by the actual TRUNCATE + * command (see above) as well as replication subscribers that execute a + * replicated TRUNCATE action. + * + * explicit_rels is the list of Relations to truncate that the command + * specified. relids is the list of Oids corresponding to explicit_rels. + * relids_logged is the list of Oids (a subset of relids) that require + * WAL-logging. This is all a bit redundant, but the existing callers have + * this information handy in this form. + */ +void ExecuteTruncateGuts( + List *explicit_rels, List *relids, List *relids_logged, List *rels_in_redis, + DropBehavior behavior, bool restart_seqs, TruncateStmt* stmt) +{ + List *rels; + List *seq_relids = NIL; + EState *estate; + ResultRelInfo *resultRelInfos; + ResultRelInfo *resultRelInfo; + SubTransactionId mySubid; + ListCell *cell; + Oid *logrelids; + List* autoinc_seqoids = NIL; + bool isDfsTruncate = false; + /* - * In CASCADE mode, suck in all referencing relations as well. This + * Open, exclusive-lock, and check all the explicitly-specified relations + * + * In CASCADE mode, suck in all referencing relations as well. This * requires multiple iterations to find indirectly-dependent relations. At * each phase, we need to exclusive-lock new rels before looking for their - * dependencies, else we might miss something. Also, we check each rel as + * dependencies, else we might miss something. Also, we check each rel as * soon as we open it, to avoid a faux pas such as holding lock for a long * time on a rel we have no permissions for. */ - if (stmt->behavior == DROP_CASCADE) { + rels = list_copy(explicit_rels); + if (behavior == DROP_CASCADE) { for (;;) { List* newrelids = NIL; newrelids = heap_truncate_find_FKs(relids); - if (newrelids == NIL) + if (newrelids == NIL) { break; /* nothing else to add */ + } foreach (cell, newrelids) { Oid relid = lfirst_oid(cell); @@ -4547,6 +4627,9 @@ void ExecuteTruncate(TruncateStmt* stmt) truncate_check_rel(rel); rels = lappend(rels, rel); relids = lappend_oid(relids, relid); + /* Log this relation only if needed for logical decoding */ + if (RelationIsLogicallyLogged(rel)) + relids_logged = lappend_oid(relids_logged, relid); } } } @@ -4559,7 +4642,7 @@ void ExecuteTruncate(TruncateStmt* stmt) #ifdef USE_ASSERT_CHECKING heap_truncate_check_FKs(rels, false); #else - if (stmt->behavior == DROP_RESTRICT) + if (behavior == DROP_RESTRICT) heap_truncate_check_FKs(rels, false); #endif @@ -4569,7 +4652,7 @@ void ExecuteTruncate(TruncateStmt* stmt) * We want to do this early since it's pointless to do all the truncation * work only to fail on sequence permissions. */ - if (stmt->restart_seqs) { + if (restart_seqs) { foreach (cell, rels) { Relation rel = (Relation)lfirst(cell); List* seqlist = getOwnedSequences(RelationGetRelid(rel)); @@ -4787,6 +4870,41 @@ void ExecuteTruncate(TruncateStmt* stmt) ResetSequence(seq_relid, true); } + /* + * Write a WAL record to allow this set of actions to be logically decoded. + * + * Assemble an array of relids so we can write a single WAL record for the + * whole action. + */ + if (list_length(relids_logged) > 0) + { + xl_heap_truncate xlrec; + int i = 0; + + /* should only get here if wal_level >= logical */ + Assert(XLogLogicalInfoActive()); + + logrelids = (Oid*)palloc(list_length(relids_logged) * sizeof(Oid)); + foreach (cell, relids_logged) + logrelids[i++] = lfirst_oid(cell); + + xlrec.dbId = u_sess->proc_cxt.MyDatabaseId; + xlrec.nrelids = list_length(relids_logged); + xlrec.flags = 0; + if (behavior == DROP_CASCADE) + xlrec.flags |= XLH_TRUNCATE_CASCADE; + if (restart_seqs) + xlrec.flags |= XLH_TRUNCATE_RESTART_SEQS; + + XLogBeginInsert(); + XLogRegisterData((char *) &xlrec, SizeOfHeapTruncate); + XLogRegisterData((char *) logrelids, list_length(relids_logged) * sizeof(Oid)); + + // XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN); + + (void) XLogInsert(RM_HEAP3_ID, XLOG_HEAP3_TRUNCATE); + } + /* * Process all AFTER STATEMENT TRUNCATE triggers. */ @@ -4798,86 +4916,88 @@ void ExecuteTruncate(TruncateStmt* stmt) } #ifdef ENABLE_MULTIPLE_NODES - /* - * In Postgres-XC, TRUNCATE needs to be launched to remote nodes before the - * AFTER triggers are launched. This insures that the triggers are being fired - * by correct events. - */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - if (u_sess->attr.attr_sql.enable_parallel_ddl && !isFirstNode) { - bool is_temp = false; - RemoteQuery* step = makeNode(RemoteQuery); - ExecNodes* exec_nodes = NULL; + if (stmt) { + /* + * In Postgres-XC, TRUNCATE needs to be launched to remote nodes before the + * AFTER triggers are launched. This insures that the triggers are being fired + * by correct events. + */ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { + if (u_sess->attr.attr_sql.enable_parallel_ddl && !isFirstNode) { + bool is_temp = false; + RemoteQuery* step = makeNode(RemoteQuery); + ExecNodes* exec_nodes = NULL; + + /* Check un-allowed case where truncate tables from different node groups */ + if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); + } - /* Check un-allowed case where truncate tables from different node groups */ - if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); - } + foreach (cell, stmt->relations) { + Oid relid; + RangeVar* rel = (RangeVar*)lfirst(cell); - foreach (cell, stmt->relations) { - Oid relid; - RangeVar* rel = (RangeVar*)lfirst(cell); + relid = RangeVarGetRelid(rel, NoLock, false); - relid = RangeVarGetRelid(rel, NoLock, false); + if (exec_nodes == NULL) { + exec_nodes = RelidGetExecNodes(relid); + } - if (exec_nodes == NULL) { - exec_nodes = RelidGetExecNodes(relid); + if (IsTempTable(relid)) { + is_temp = true; + break; + } } - if (IsTempTable(relid)) { - is_temp = true; - break; - } - } + step->combine_type = COMBINE_TYPE_SAME; + step->exec_nodes = exec_nodes; + step->sql_statement = pstrdup(sql_statement); + step->force_autocommit = false; + step->exec_type = EXEC_ON_DATANODES; + step->is_temp = is_temp; + ExecRemoteUtility_ParallelDDLMode(step, FirstExecNode); + pfree_ext(step->sql_statement); + pfree_ext(step); + } else { + bool is_temp = false; + RemoteQuery* step = makeNode(RemoteQuery); + ExecNodes* exec_nodes = NULL; - step->combine_type = COMBINE_TYPE_SAME; - step->exec_nodes = exec_nodes; - step->sql_statement = pstrdup(sql_statement); - step->force_autocommit = false; - step->exec_type = EXEC_ON_DATANODES; - step->is_temp = is_temp; - ExecRemoteUtility_ParallelDDLMode(step, FirstExecNode); - pfree_ext(step->sql_statement); - pfree_ext(step); - } else { - bool is_temp = false; - RemoteQuery* step = makeNode(RemoteQuery); - ExecNodes* exec_nodes = NULL; + /* Check un-allowed case where truncate tables from different node groups */ + if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); + } - /* Check un-allowed case where truncate tables from different node groups */ - if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); - } + foreach (cell, stmt->relations) { + Oid relid; + RangeVar* rel = (RangeVar*)lfirst(cell); - foreach (cell, stmt->relations) { - Oid relid; - RangeVar* rel = (RangeVar*)lfirst(cell); + relid = RangeVarGetRelid(rel, NoLock, false); - relid = RangeVarGetRelid(rel, NoLock, false); + if (exec_nodes == NULL) { + exec_nodes = RelidGetExecNodes(relid); + } - if (exec_nodes == NULL) { - exec_nodes = RelidGetExecNodes(relid); + if (IsTempTable(relid)) { + is_temp = true; + break; + } } - if (IsTempTable(relid)) { - is_temp = true; - break; - } + step->combine_type = COMBINE_TYPE_SAME; + step->exec_nodes = exec_nodes; + step->sql_statement = pstrdup(sql_statement); + step->force_autocommit = false; + step->exec_type = is_temp ? EXEC_ON_DATANODES : EXEC_ON_ALL_NODES; + step->is_temp = is_temp; + ExecRemoteUtility(step); + pfree_ext(step->sql_statement); + pfree_ext(step); } - - step->combine_type = COMBINE_TYPE_SAME; - step->exec_nodes = exec_nodes; - step->sql_statement = pstrdup(sql_statement); - step->force_autocommit = false; - step->exec_type = is_temp ? EXEC_ON_DATANODES : EXEC_ON_ALL_NODES; - step->is_temp = is_temp; - ExecRemoteUtility(step); - pfree_ext(step->sql_statement); - pfree_ext(step); } } #endif @@ -4904,7 +5024,11 @@ void ExecuteTruncate(TruncateStmt* stmt) /* We can clean up the EState now */ FreeExecutorState(estate); - /* And close the rels (can't do this while EState still holds refs) */ + /* + * Close any rels opened by CASCADE (can't do this while EState still + * holds refs) + */ + rels = list_difference_ptr(rels, explicit_rels); foreach (cell, rels) { Relation rel = (Relation)lfirst(cell); @@ -6418,6 +6542,14 @@ static ObjectAddress RenameTableFeature(RenameStmt* stmt) storageTable[tableName_Count].schemaname = pstrdup(orgiSchema); storageTable[tableName_Count].relname = pstrdup(orgitable); tableName_Count++; + + if (temp_name->schemaname == NULL) { + if (t_thrd.mem_cxt.msg_mem_cxt) { + temp_name->schemaname = MemoryContextStrdup(t_thrd.mem_cxt.msg_mem_cxt, orgiSchema); + } else { + temp_name->schemaname = pstrdup(orgiSchema); + } + } } if (stmt->renameTargetList->length >= 2) { @@ -6489,6 +6621,10 @@ static ObjectAddress RenameTableFeature(RenameStmt* stmt) modfyNameSpace = orgiNameSpace; } + if (temp_name->schemaname == NULL) { + temp_name->schemaname = MemoryContextStrdup(t_thrd.mem_cxt.msg_mem_cxt, orgiSchema); + } + /* Check whether exist Synonym on old table name and new table name */ if (orgiSchema == NULL) { orgiSchema = get_namespace_name(relnamespace); @@ -6962,6 +7098,67 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname, char* newschema relation_close(targetrelation, NoLock); } +/* + * ResetRelRewrite - reset relrewrite + */ +void ResetRelRewrite(Oid myrelid) +{ + HeapTuple newTuple; + Relation pg_class; + HeapTuple tuple; + Datum relOptions; + Datum newOptions; + List* defList = NIL; + Datum replVal[Natts_pg_class]; + bool replNull[Natts_pg_class]; + bool replRepl[Natts_pg_class]; + bool isNull = false; + bool removed = false; + errno_t rc; + + /* + * Find relation's pg_class tuple. + */ + pg_class = heap_open(RelationRelationId, RowExclusiveLock); + tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(myrelid)); + if (!HeapTupleIsValid(tuple)) { + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not find tuple for relation %u", myrelid))); + } + + relOptions = fastgetattr(tuple, Anum_pg_class_reloptions, RelationGetDescr(pg_class), &isNull); + if (!isNull) { + defList = untransformRelOptions(relOptions); + defList = RemoveRelOption(defList, "relrewrite", &removed); + if (removed) { + newOptions = transformRelOptions((Datum)0, defList, NULL, NULL, false, false); + + rc = memset_s(replVal, sizeof(replVal), 0, sizeof(replVal)); + securec_check(rc, "\0", "\0"); + rc = memset_s(replNull, sizeof(replNull), false, sizeof(replNull)); + securec_check(rc, "\0", "\0"); + rc = memset_s(replRepl, sizeof(replRepl), false, sizeof(replRepl)); + securec_check(rc, "\0", "\0"); + if (PointerIsValid(newOptions)) { + replVal[Anum_pg_class_reloptions - 1] = newOptions; + replNull[Anum_pg_class_reloptions - 1] = false; + } else { + replNull[Anum_pg_class_reloptions - 1] = true; + } + replRepl[Anum_pg_class_reloptions - 1] = true; + newTuple = heap_modify_tuple(tuple, RelationGetDescr(pg_class), replVal, replNull, replRepl); + simple_heap_update(pg_class, &newTuple->t_self, newTuple); + CatalogUpdateIndexes(pg_class, newTuple); + + heap_freetuple_ext(newTuple); + } + list_free_ext(defList); + } + heap_freetuple_ext(tuple); + + heap_close(pg_class, RowExclusiveLock); +} + /* * @@GaussDB@@ * Target : data distributed by range or list @@ -7534,7 +7731,7 @@ typedef enum { * 4. the default value is actually null */ static AT_INSTANT_DEFAULT_VALUE shouldUpdateAllTuples( - Expr* defaultExpr, Oid typeOid, int attLen, bool attByVal, bytea** defaultVal) + Expr* defaultExpr, Oid typeOid, int attLen, bool attByVal, bytea** defaultVal, char** initdefval) { bool isNull = false; int i; @@ -7609,6 +7806,13 @@ static AT_INSTANT_DEFAULT_VALUE shouldUpdateAllTuples( (void)MemoryContextSwitchTo(oldcxt); if (!isNull) { + if (initdefval) { + Oid typoutput = 0; + bool typisvarlena = false; + getTypeOutputInfo(typeOid, &typoutput, &typisvarlena); + *initdefval = pstrdup(OidOutputFunctionCall(typoutput, value)); + } + if (attByVal) { result = (bytea*)palloc(attLen + VARHDRSZ); SET_VARSIZE(result, attLen + VARHDRSZ); @@ -8258,6 +8462,8 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu * numbers in different children). */ cmd = (AlterTableCmd*)copyObject(cmd); + + cmd->recursing = recursing; /* * Do permissions checking, recursion to child tables if needed, and any * additional phase-1 processing needed. @@ -8409,6 +8615,7 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu break; case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATSimplePermissions(rel, ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE); + EventTriggerAlterTypeStart(cmd, rel); /* Performs own recursion */ ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode); pass = AT_PASS_ALTER_TYPE; @@ -9060,6 +9267,7 @@ static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef) static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode, bool fromReplace) { ObjectAddress address = InvalidObjectAddress; + bool commandCollected = false; elog(ES_LOGLEVEL, "[ATExecCmd] cmd subtype: %d", cmd->subtype); if (PARTITION_DDL_CMD(cmd->subtype) && RELATION_IS_PARTITIONED(rel)) { @@ -9107,9 +9315,12 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT break; case AT_AddStatistics: /* ADD STATISTICS */ ATExecAddStatistics(rel, cmd->def, lockmode); + /* prepare work es_check_alter_table_statistics broken the cmd->def, don't pass to event trigger */ + commandCollected = true; break; case AT_DeleteStatistics: /* DELETE STATISTICS */ ATExecDeleteStatistics(rel, cmd->def, lockmode); + commandCollected = true; break; case AT_SetOptions: /* ALTER COLUMN SET ( options ) */ address = ATExecSetOptions(rel, cmd->name, cmd->def, false, lockmode); @@ -9204,6 +9415,8 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT break; case AT_AlterColumnType: /* ALTER COLUMN TYPE */ address = ATExecAlterColumnType(tab, rel, cmd, lockmode); + EventTriggerAlterTypeEnd((Node *) cmd, address, tab->rewrite); + commandCollected = true; break; case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */ address = ATExecAlterColumnGenericOptions(rel, cmd->name, (List*)cmd->def, lockmode); @@ -9398,12 +9611,6 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT UpdatePgObjectMtime(tab->relid, objectType); } - /* - * Report the subcommand to interested event triggers. - */ - EventTriggerCollectAlterTableSubcmd((Node *) cmd, address); - - /* take ExclusiveLock to avoid PARTITION DDL COMMIT until we finish the InitPlan. Oid info will be masked here, and * be locked in CommitTransaction. Distribute mode doesn't support partition DDL/DML parallel work, no need this * action */ @@ -9413,6 +9620,12 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT } #endif + /* + * Report the subcommand to interested event triggers. + */ + if (cmd && !commandCollected) + EventTriggerCollectAlterTableSubcmd((Node *) cmd, address, tab->rewrite); + /* * Bump the command counter to ensure the next subcommand in the sequence * can see the changes so far @@ -9759,6 +9972,185 @@ static void UpdateGeneratedColumnIsnull(AlteredTableInfo* tab, bool* isnull, boo } } +static void repl_update_addcolumn_default(AlteredTableInfo* tab, Relation oldrel, Relation newrel, List* notnull_attrs) +{ + TupleDesc newTupDesc; + int i; + ListCell* l = NULL; + EState* estate = NULL; + CommandId mycid; + BulkInsertState bistate; + uint32 hi_options; + newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ + + CommandCounterIncrement(); + + mycid = GetCurrentCommandId(true); + + bistate = GetBulkInsertState(); + hi_options = TABLE_INSERT_SKIP_FSM; + + estate = CreateExecutorState(); + + ExprContext* econtext = NULL; + Datum* values = NULL; + bool* isnull = NULL; + bool* repl = NULL; + TupleTableSlot* newslot = NULL; + TableScanDesc scan; + HeapTuple tuple; + HeapTuple htup; + MemoryContext oldCxt; + errno_t rc = EOK; + + /* estate has been freed, prepare again */ + foreach (l, tab->constraints) { + NewConstraint* con = (NewConstraint*)lfirst(l); + if (con->isdisable) + continue; + + switch (con->contype) { + case CONSTR_CHECK: + if (estate->es_is_flt_frame){ + con->qualstate = (List*)ExecPrepareExprList((List*)con->qual, estate); + } else { + con->qualstate = (List*)ExecPrepareExpr((Expr*)con->qual, estate); + } + break; + case CONSTR_FOREIGN: + /* Nothing to do here */ + break; + default: { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized constraint type: %d", (int)con->contype))); + } break; + } + } + + econtext = GetPerTupleExprContext(estate); + newslot = MakeSingleTupleTableSlot(newTupDesc, false, oldrel->rd_tam_ops); + + i = newTupDesc->natts; + values = (Datum*)palloc(i * sizeof(Datum)); + isnull = (bool*)palloc(i * sizeof(bool)); + repl = (bool*)palloc(i * sizeof(bool)); + rc = memset_s(values, i * sizeof(Datum), 0, i * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(isnull, i * sizeof(bool), true, i * sizeof(bool)); + securec_check(rc, "\0", "\0"); + rc = memset_s(repl, i * sizeof(bool), false, i * sizeof(bool)); + securec_check(rc, "\0", "\0"); + + + scan = tableam_scan_begin(newrel, SnapshotNow, 0, NULL); + oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + + ((HeapScanDesc) scan)->rs_tupdesc = newTupDesc; + while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { + ItemPointer searchSlotTid; + searchSlotTid = tableam_tops_get_t_self(newrel, tuple); + tableam_tops_deform_tuple(tuple, newTupDesc, values, isnull); + + (void)ExecStoreTuple(tuple, newslot, InvalidBuffer, false); + econtext->ecxt_scantuple = newslot; + + foreach (l, tab->newvals) { + NewColumnValue* ex = (NewColumnValue*)lfirst(l); + /* ex->attnum 可能不是最终的字段下标 */ + if (ex->make_dml_change) { + int attnum = -1; + for (int n = 0; n < newTupDesc->natts; ++n) { + Form_pg_attribute thisattr = &newTupDesc->attrs[n]; + + /* skip the dropped and rewritted columns */ + if (!thisattr->attisdropped && ex->col_name && + pg_strcasecmp(ex->col_name, NameStr(thisattr->attname)) == 0) { + attnum = thisattr->attnum; + break; + } + } + + if (attnum <= 0) { + ereport(ERROR, ( + errmsg("can not find column \"%s\"", ex->col_name))); + } + + values[attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[attnum - 1], NULL); + repl[attnum - 1] = true; + } + } + + htup = heap_modify_tuple(tuple, newTupDesc, values, isnull, repl); + + /* Now check any constraints on the possibly-changed tuple */ + (void)ExecStoreTuple(htup, newslot, InvalidBuffer, false); + econtext->ecxt_scantuple = newslot; + + foreach (l, notnull_attrs) { + int attn = lfirst_int(l); + + /* replace heap_attisnull with relationAttIsNull + * due to altering table instantly + */ + if (relationAttIsNull(htup, attn + 1, newTupDesc)) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" contains null values", NameStr(newTupDesc->attrs[attn].attname)))); + } + + foreach (l, tab->constraints) { + NewConstraint* con = (NewConstraint*)lfirst(l); + + switch (con->contype) { + case CONSTR_CHECK: + if (!ExecQual(con->qualstate, econtext, true)) + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("check constraint \"%s\" is violated by some row", con->name))); + break; + case CONSTR_FOREIGN: + /* Nothing to do here */ + break; + default: { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized constraint type: %d", (int)con->contype))); + } + } + } + + + TM_FailureData tmfd; + bool updateIndexes = false; + Bitmapset *modifiedIdxAttrs = NULL; + bool allowInplaceUpdate = true; + + (void)tableam_tuple_update(newrel, NULL, searchSlotTid, htup, mycid, InvalidSnapshot, estate->es_snapshot, true, &newslot, &tmfd, &updateIndexes, &modifiedIdxAttrs, + false, allowInplaceUpdate); + + ResetExprContext(econtext); + + CHECK_FOR_INTERRUPTS(); + } + + MemoryContextSwitchTo(oldCxt); + tableam_scan_end(scan); + ExecDropSingleTupleTableSlot(newslot); + + FreeExecutorState(estate); + FreeBulkInsertState(bistate); + if (((hi_options & TABLE_INSERT_SKIP_WAL) || enable_heap_bcm_data_replication()) && + !RelationIsSegmentTable(newrel)) + heap_sync(newrel); + /* + * After the temporary table is rewritten, the relfilenode changes. + * We need to find new TmptableCacheEntry with new relfilenode. + * Then set new auto_increment counter value in new TmptableCacheEntry. + */ + CopyTempAutoIncrement(oldrel, newrel); + +} + /* * change ATRewriteTable() input: oid->rel */ @@ -9783,6 +10175,9 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat oldTupDesc = tab->oldDesc; newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ + bool repl_modify = false; + bool need_dml_change_col = false; + /* * Prepare a BulkInsertState and options for heap_insert. Because we're * building a new heap, we can skip WAL-logging and fsync it to disk at @@ -9834,11 +10229,27 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat } } + if (newrel && RelationIsRowFormat(oldrel) && + tab->rewrite > 0 && XLogLogicalInfoActive() && + oldrel->rd_rel->relpersistence != RELPERSISTENCE_TEMP && + oldrel->relreplident == REPLICA_IDENTITY_FULL) { + + repl_modify = true; + } + foreach (l, tab->newvals) { NewColumnValue* ex = (NewColumnValue*)lfirst(l); /* expr already planned */ ex->exprstate = ExecInitExpr((Expr*)ex->expr, NULL); + + if (ex->is_generated || ex->is_alter_using) { + repl_modify = false; + } + + if (ex->make_dml_change) { + need_dml_change_col = true; + } } notnull_attrs = NIL; @@ -10184,7 +10595,13 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat continue; } - values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1]); + if (repl_modify && ex->make_dml_change) { + isnull[ex->attnum - 1] = true; + } else { + values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1]); + } + + if (ex->is_autoinc) { need_autoinc = (autoinc_attnum > 0); } @@ -10230,61 +10647,74 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat */ tuple = EvaluateGenExpr(tab, tuple, newTupDesc, econtext, values, isnull); - foreach (l, notnull_attrs) { - int attn = lfirst_int(l); - - /* replace heap_attisnull with relationAttIsNull - * due to altering table instantly - */ - if (relationAttIsNull(tuple, attn + 1, newTupDesc)) - ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" contains null values", NameStr(newTupDesc->attrs[attn].attname)))); - } - - foreach (l, tab->constraints) { - NewConstraint* con = (NewConstraint*)lfirst(l); - ListCell* lc = NULL; + if (!repl_modify) { + foreach (l, notnull_attrs) { + int attn = lfirst_int(l); - switch (con->contype) { - case CONSTR_CHECK: - { - if (estate->es_is_flt_frame){ - foreach (lc, con->qualstate) { - ExprState* exprState = (ExprState*)lfirst(lc); + /* replace heap_attisnull with relationAttIsNull + * due to altering table instantly + */ + if (relationAttIsNull(tuple, attn + 1, newTupDesc)) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" contains null values", NameStr(newTupDesc->attrs[attn].attname)))); + } - if (!ExecCheckByFlatten(exprState, econtext)) + foreach (l, tab->constraints) { + NewConstraint* con = (NewConstraint*)lfirst(l); + ListCell* lc = NULL; + + switch (con->contype) { + case CONSTR_CHECK: + { + if (estate->es_is_flt_frame){ + foreach (lc, con->qualstate) { + ExprState* exprState = (ExprState*)lfirst(lc); + + if (!ExecCheckByFlatten(exprState, econtext)) + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("check constraint \"%s\" is violated by some row", + con->name))); + } + } else { + if (!ExecQualByRecursion(con->qualstate, econtext, true)){ ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", - con->name))); + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("check constraint \"%s\" is violated by some row", + con->name))); + } } - } else { - if (!ExecQualByRecursion(con->qualstate, econtext, true)){ - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", - con->name))); } - } - } + break; + case CONSTR_FOREIGN: + /* Nothing to do here */ break; - case CONSTR_FOREIGN: - /* Nothing to do here */ - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized constraint type: %d", (int)con->contype))); + default: { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized constraint type: %d", (int)con->contype))); + } } } } /* Write the tuple out to the new relation */ if (newrel) { - (void)tableam_tuple_insert(newrel, tuple, mycid, hi_options, bistate); + if (repl_modify) { + /* deal with "add column c1 mytyp default expr" + */ + tuple = (HeapTuple)heap_form_tuple(newTupDesc, values, isnull); + (void)tableam_tuple_insert(newrel, tuple, mycid, hi_options, bistate); - if (autoinc > 0) { - SetRelAutoIncrement(oldrel, newTupDesc, autoinc); + if (autoinc > 0) { + SetRelAutoIncrement(oldrel, newTupDesc, autoinc); + } + } else { + (void)tableam_tuple_insert(newrel, tuple, mycid, hi_options, bistate); + + if (autoinc > 0) { + SetRelAutoIncrement(oldrel, newTupDesc, autoinc); + } } } ResetExprContext(econtext); @@ -10322,6 +10752,10 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat */ CopyTempAutoIncrement(oldrel, newrel); } + + if (repl_modify) { + repl_update_addcolumn_default(tab, oldrel, newrel, notnull_attrs); + } } static void ATRewriteTable(AlteredTableInfo* tab, Relation oldrel, Relation newrel) @@ -10913,7 +11347,7 @@ static void ATPrepCheckDefault(Node* node) } static FORCE_INLINE void ATExecAppendDefValExpr(_in_ AttrNumber attnum, _in_ Expr* defval, _out_ AlteredTableInfo* tab, - ColumnDef *colDef, bool is_autoinc, bool is_addloc) + ColumnDef *colDef, bool is_autoinc, bool is_addloc, bool make_dml_change = false) { NewColumnValue* newval; @@ -10928,6 +11362,7 @@ static FORCE_INLINE void ATExecAppendDefValExpr(_in_ AttrNumber attnum, _in_ Exp newval->is_generated = (colDef->generatedCol != '\0'); newval->col_name = pstrdup(colDef->colname); newval->is_autoinc = is_autoinc; + newval->make_dml_change = make_dml_change; tab->rewrite |= AT_REWRITE_ALTER_PERSISTENCE; } @@ -13017,13 +13452,40 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat errmsg("It's not supported to alter table add column default with nextval expression."))); } else if (RelationIsCUFormat(rel)) { ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false); - } else if (tab->rewrite>0 || colDef->generatedCol || - RelationUsesSpaceType(rel->rd_rel->relpersistence) == SP_TEMP) { + } else if (colDef->generatedCol ) { ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true); + } else if (RelationUsesSpaceType(rel->rd_rel->relpersistence) == SP_TEMP) { + bytea* value = NULL; + colDef->initdefval = NULL; + (void)shouldUpdateAllTuples(defval, attribute.atttypid, attribute.attlen, attribute.attbyval, &value, &colDef->initdefval); + ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true); + } else if (tab->rewrite) { + bytea* value = NULL; + bool addcolumndef = false; + + if (XLogLogicalInfoActive() && RelationIsRowFormat(rel)) { + if (IsA(defval, Const) && constIsNull((Const*)defval)) { + ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true); + } else { + colDef->initdefval = NULL; + AT_INSTANT_DEFAULT_VALUE ret = + shouldUpdateAllTuples(defval, attribute.atttypid, attribute.attlen, attribute.attbyval, &value, &colDef->initdefval); + + if (ret == DEFAULT_OTHER) { + addcolumndef = true; + colDef->initdefval = NULL; + } + + ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true, addcolumndef); + } + } else { + ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true); + } } else { bytea* value = NULL; + colDef->initdefval = NULL; AT_INSTANT_DEFAULT_VALUE ret = - shouldUpdateAllTuples(defval, attribute.atttypid, attribute.attlen, attribute.attbyval, &value); + shouldUpdateAllTuples(defval, attribute.atttypid, attribute.attlen, attribute.attbyval, &value, &colDef->initdefval); if (ret == DEFAULT_NOT_NULL_CONST) { Assert(value != NULL); @@ -13035,6 +13497,7 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat */ testNotNull = false; } else if (ret == DEFAULT_OTHER) { + colDef->initdefval = NULL; if (isDfsTable) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), @@ -13044,7 +13507,7 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat "2. the storage length of default value may be greater than 127.\n" "3. the data type of new column is not supported.")))); } - ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false); + ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false, true); } /* nothing to do if ret is DEFAULT_NULL */ } @@ -16604,6 +17067,7 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation newval->newattnum = 0; newval->col_name = pstrdup(colName); newval->generate_attnum = 0; + newval->is_alter_using = (def->raw_default ? true : false); newval->is_updated = false; tab->newvals = lappend(tab->newvals, newval); @@ -18045,7 +18509,7 @@ static void AttachEachCommandInQueue( con = (Constraint*)cmd->def; con->old_pktable_oid = refRelId; /* rewriting neither side of a FK */ - if (con->contype == CONSTR_FOREIGN && !rewrite && tab->rewrite <= 0) + if (con->contype == CONSTR_FOREIGN && !rewrite && tab->rewrite == 0) TryReuseForeignKey(oldId, con); cmd->subtype = AT_ReAddConstraint; tab->subcmds[AT_PASS_OLD_CONSTR] = lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 2b5dd2d8ba..5000fc0196 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -1671,7 +1671,8 @@ bool isAllTempObjects(Node* parse_tree, const char* query_string, bool sent_to_r foreach (cell, ((DropStmt*)parse_tree)->objects) { List* obj_name = (List*)lfirst(cell); char* name = NameListToString(obj_name); - if (isTempNamespaceName(name) || isToastTempNamespaceName(name)) + if (isTempNamespaceName(name) || isToastTempNamespaceName(name) + || strcmp(name, "pg_temp") == 0) return true; } @@ -3394,15 +3395,6 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, GrantRole((GrantRoleStmt*)parse_tree); #endif break; - case T_CreateEventStmt: /* CREATE EVENT */ - CreateEventCommand((CreateEventStmt*)parse_tree); - break; - case T_AlterEventStmt: /* CREATE EVENT */ - AlterEventCommand((AlterEventStmt*)parse_tree); - break; - case T_DropEventStmt: /* DROP EVENT */ - DropEventCommand((DropEventStmt*)parse_tree); - break; case T_ShowEventStmt: /* SHOW EVENTS */ ShowEventCommand((ShowEventStmt*)parse_tree, dest); break; @@ -6841,6 +6833,18 @@ ProcessUtilitySlow(Node *parse_tree, commandCollected = true; break; + case T_CreateEventStmt: /* CREATE EVENT */ + address = CreateEventCommand((CreateEventStmt*)parse_tree); + + break; + case T_AlterEventStmt: /* CREATE EVENT */ + address = AlterEventCommand((AlterEventStmt*)parse_tree); + break; + case T_DropEventStmt: /* DROP EVENT */ + DropEventCommand((DropEventStmt*)parse_tree); + break; + + case T_TableOfTypeStmt: /* CREATE TYPE AS TABLE OF */ { TableOfTypeStmt* stmt = (TableOfTypeStmt*)parse_tree; diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 8ebd16f043..3e433f5c79 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -254,6 +254,7 @@ static relopt_int intRelOpts[] = { 0, 7}, {{ "collate", "set relation default collation", RELOPT_KIND_HEAP }, 0, 0, 2000000000 }, + {{ "relrewrite", "set relation relrewrite", RELOPT_KIND_HEAP | RELOPT_KIND_TOAST }, 0, 0, 2000000000 }, /* list terminator */ {{NULL}} }; @@ -2048,7 +2049,8 @@ bytea *default_reloptions(Datum reloptions, bool validate, relopt_kind kind) /* SPQ index B-Tree build: btree index build use spq */ {"spq_build", RELOPT_TYPE_STRING, offsetof(StdRdOptions, spq_bt_build_offset)}, #endif - { "deduplication", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, deduplication)} + { "deduplication", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, deduplication)}, + { "relrewrite", RELOPT_TYPE_INT, offsetof(StdRdOptions, relrewrite)}, }; options = parseRelOptions(reloptions, validate, kind, &numoptions); @@ -2757,8 +2759,8 @@ void check_collate_in_options(List *user_options) */ void ForbidOutUsersToSetInnerOptions(List *userOptions) { - static const char* innnerOpts[] = { - "internal_mask", "start_ctid_internal", "end_ctid_internal", "append_mode_internal", "wait_clean_gpi"}; + static const char *innnerOpts[] = {"internal_mask", "start_ctid_internal", "end_ctid_internal", + "append_mode_internal", "wait_clean_gpi", "relrewrite"}; if (userOptions != NULL) { int firstInvalidOpt = -1; diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp index 1f0f55f651..02385ea1d9 100755 --- a/src/gausskernel/storage/access/heap/heapam.cpp +++ b/src/gausskernel/storage/access/heap/heapam.cpp @@ -9550,6 +9550,13 @@ void heap3_redo(XLogReaderState* record) case XLOG_HEAP3_INVALID: heap_xlog_invalid(record); break; + case XLOG_HEAP3_TRUNCATE: + /* + * TRUNCATE is a no-op because the actions are already logged as + * SMGR WAL records. TRUNCATE WAL record only exists for logical + * decoding. + */ + break; default: ereport(PANIC, (errmsg("heap3_redo: unknown op code %hhu", info))); } diff --git a/src/gausskernel/storage/access/redo/redo_heapam.cpp b/src/gausskernel/storage/access/redo/redo_heapam.cpp index 28ce6f5327..bd9df24908 100755 --- a/src/gausskernel/storage/access/redo/redo_heapam.cpp +++ b/src/gausskernel/storage/access/redo/redo_heapam.cpp @@ -1631,6 +1631,8 @@ XLogRecParseState *Heap3RedoParseToBlock(XLogReaderState *record, uint32 *blockn case XLOG_HEAP3_INVALID: recordblockstate = HeapXlogInvalidParseBlock(record, blocknum); break; + case XLOG_HEAP3_TRUNCATE: + break; default: ereport(PANIC, (errmsg("Heap3RedoParseToBlock: unknown op code %u", info))); } @@ -1650,6 +1652,8 @@ void Heap3RedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatar case XLOG_HEAP3_INVALID: HeapXlogInvalidBlock(blockhead, blockdatarec, bufferinfo); break; + case XLOG_HEAP3_TRUNCATE: + break; default: ereport(PANIC, (errmsg("heap3_redo_block: unknown op code %u", info))); } diff --git a/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp index f72e292e5b..8ad4427bdc 100644 --- a/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp @@ -167,6 +167,17 @@ void heap_desc(StringInfo buf, XLogReaderState *record) xl_heap_base_shift *xlrec = (xl_heap_base_shift *)rec; appendStringInfo(buf, "base_shift delta %ld multi %d", xlrec->delta, xlrec->multi); + } else if (info == XLOG_HEAP3_TRUNCATE) { + xl_heap_truncate *xlrec = (xl_heap_truncate *) rec; + int i; + + if (xlrec->flags & XLH_TRUNCATE_CASCADE) + appendStringInfo(buf, "cascade "); + if (xlrec->flags & XLH_TRUNCATE_RESTART_SEQS) + appendStringInfo(buf, "restart_seqs "); + appendStringInfo(buf, "nrelids %u relids", xlrec->nrelids); + for (i = 0; i < (int)xlrec->nrelids; i++) + appendStringInfo(buf, " %u", xlrec->relids[i]); } else appendStringInfo(buf, "UNKNOWN"); } @@ -339,6 +350,8 @@ const char* heap3_type_name(uint8 subtype) return "heap3_rewrite"; } else if (info == XLOG_HEAP3_INVALID) { return "heap3_invalid"; + } else if (info == XLOG_HEAP3_TRUNCATE) { + return "heap3_truncate"; } else { return "unkown_type"; } @@ -371,6 +384,8 @@ void heap3_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "]"); } } + } else if (info == XLOG_HEAP3_TRUNCATE) { + appendStringInfo(buf, "XLOG_HEAP_TRUNCATE"); } else { appendStringInfo(buf, "UNKNOWN"); } diff --git a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp index e05c555f23..c63c0aea78 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp @@ -204,7 +204,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchSeqRecord, RmgrRecordInfoValid, RM_SEQ_ID, XLOG_SEQ_LOG, XLOG_SEQ_LOG }, { DispatchSpgistRecord, RmgrRecordInfoValid, RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, XLOG_SPGIST_VACUUM_REDIRECT }, { DispatchRepSlotRecord, RmgrRecordInfoValid, RM_SLOT_ID, XLOG_SLOT_CREATE, XLOG_TERM_LOG }, - { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_INVALID }, + { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_TRUNCATE }, { DispatchBarrierRecord, RmgrRecordInfoValid, RM_BARRIER_ID, XLOG_BARRIER_CREATE, XLOG_BARRIER_SWITCHOVER }, #ifdef ENABLE_MOT {DispatchMotRecord, NULL, RM_MOT_ID, 0, 0}, diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index 95e3a4271a..2e308b8231 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -203,7 +203,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchSeqRecord, RmgrRecordInfoValid, RM_SEQ_ID, XLOG_SEQ_LOG, XLOG_SEQ_LOG }, { DispatchSpgistRecord, RmgrRecordInfoValid, RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, XLOG_SPGIST_VACUUM_REDIRECT }, { DispatchRepSlotRecord, RmgrRecordInfoValid, RM_SLOT_ID, XLOG_SLOT_CREATE, XLOG_TERM_LOG }, - { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_INVALID }, + { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_TRUNCATE }, { DispatchBarrierRecord, RmgrRecordInfoValid, RM_BARRIER_ID, XLOG_BARRIER_CREATE, XLOG_BARRIER_SWITCHOVER }, #ifdef ENABLE_MOT {DispatchMotRecord, NULL, RM_MOT_ID, 0, 0}, diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp index 2be3f91475..81151b609f 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp @@ -196,7 +196,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchSeqRecord, RmgrRecordInfoValid, RM_SEQ_ID, XLOG_SEQ_LOG, XLOG_SEQ_LOG }, { DispatchSpgistRecord, RmgrRecordInfoValid, RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, XLOG_SPGIST_VACUUM_REDIRECT }, { DispatchRepSlotRecord, RmgrRecordInfoValid, RM_SLOT_ID, XLOG_SLOT_CREATE, XLOG_TERM_LOG }, - { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_INVALID }, + { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_TRUNCATE }, { DispatchBarrierRecord, RmgrRecordInfoValid, RM_BARRIER_ID, XLOG_BARRIER_CREATE, XLOG_BARRIER_SWITCHOVER }, #ifdef ENABLE_MOT diff --git a/src/gausskernel/storage/replication/logical/ddlmessage.cpp b/src/gausskernel/storage/replication/logical/ddlmessage.cpp index c4d7982a3f..4bd89c5929 100644 --- a/src/gausskernel/storage/replication/logical/ddlmessage.cpp +++ b/src/gausskernel/storage/replication/logical/ddlmessage.cpp @@ -55,7 +55,7 @@ LogLogicalDDLMessage(const char *prefix, Oid relid, DeparsedCommandType cmdtype, char *tmp = pstrdup(message); char *owner = NULL; - if (cmdtype != DCT_TableDropStart) { + if (cmdtype != DCT_TableDropStart && cmdtype != DCT_TypeDropStart) { char *decodestring = deparse_ddl_json_to_string(tmp, &owner); elog(LOG, "will decode to : %s, [owner %s]", decodestring, owner ? owner : "none"); } diff --git a/src/gausskernel/storage/replication/logical/ddltrigger.cpp b/src/gausskernel/storage/replication/logical/ddltrigger.cpp index 71f6cc760c..0a19ffc02f 100644 --- a/src/gausskernel/storage/replication/logical/ddltrigger.cpp +++ b/src/gausskernel/storage/replication/logical/ddltrigger.cpp @@ -22,6 +22,8 @@ #include "commands/event_trigger.h" #include "funcapi.h" #include "lib/ilist.h" +#include "nodes/makefuncs.h" +#include "parser/parse_type.h" #include "replication/ddlmessage.h" #include "tcop/ddldeparse.h" #include "utils/lsyscache.h" @@ -45,7 +47,7 @@ * accessed in those functions may not even exist on the subscriber. */ static void -check_command_publishable(ddl_deparse_context context) +check_command_publishable(ddl_deparse_context context, bool rewrite) { if (context.max_volatility == PROVOLATILE_VOLATILE) ereport(ERROR, @@ -53,24 +55,24 @@ check_command_publishable(ddl_deparse_context context) errmsg("cannot use volatile function in this command because it cannot be replicated in DDL replication"))); } -bool relation_support_ddl_replication(Oid relid) +bool relation_support_ddl_replication(Oid relid, bool rewrite) { - bool support = false; - + bool support = true; + Relation rel = relation_open(relid, AccessShareLock); - /* if relpersistence is 'p', not support */ + Oid relrewrite = RelationGetRelrewriteOption(rel); if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - return false; - } - if (RelationIsRowFormat(rel) && RelationIsAstoreFormat(rel)) { - support = true; - } else if (RelationIsIndex(rel)) { - if(IS_BTREE(rel) && !RelationAmIsBtree(rel)) { - return false; + support = false; + } else if (rel->rd_rel->relkind == RELKIND_RELATION) { + if (pg_strcasecmp(RelationGetOrientation(rel), ORIENTATION_ROW)) { + support = false; + } else if (OidIsValid(relrewrite)) { + support = false; + } else if (!RelationIsAstoreFormat(rel)) { + support = false; } - support = true; - } else if(RelationIsSequnce(rel)) { - support = true; + } else if (rel->rd_rel->relkind == RELKIND_INDEX && RelationIsUstoreIndex(rel)) { + support = false; } relation_close(rel, AccessShareLock); @@ -78,6 +80,25 @@ bool relation_support_ddl_replication(Oid relid) return support; } +bool type_support_ddl_replication(Oid typid) +{ + bool support = false; + HeapTuple typtup; + Form_pg_type typform; + + typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); + if (!HeapTupleIsValid(typtup)) + elog(ERROR, "cache lookup failed for type with OID %u", typid); + + typform = (Form_pg_type) GETSTRUCT(typtup); + if (typform->typtype == TYPTYPE_COMPOSITE || typform->typtype == TYPTYPE_ENUM) { + support = true; + } + + ReleaseSysCache(typtup); + return support; +} + /* * Deparse the ddl command and log it prior to * execution. Currently only used for DROP TABLE command @@ -105,6 +126,25 @@ publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) Node *object = (Node*)lfirst(cell1); ObjectAddress address; Relation relation = NULL; + char *schemaname = NULL; + char *objname = NULL; + TypeName *typname = NULL; + Node *ptype = NULL; + + if (stmt->removeType == OBJECT_TYPE) { + /* for DROP TYPE */ + Assert(IsA(object, List) && list_length((List*)object) >= 1); + ptype = (Node *) linitial((List*)object); + if (ptype->type == T_String) + typname = makeTypeNameFromNameList((List*)object); + else if (ptype->type == T_TypeName) + typname = (TypeName *)ptype; + + objname = TypeNameToString(typname); + } else { + /* for DROP TABLE/DROP IDNEX/DROP MATERIALIZED VIEW */ + DeconstructQualifiedName((List*)object, &schemaname, &objname); + } address = get_object_address(stmt->removeType, IsA(object, List) ? (List*)object : list_make1(object), @@ -114,28 +154,98 @@ publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) true); /* Object does not exist, nothing to do */ - if (!relation) - continue; + if (relation) { + if (get_rel_relkind(address.objectId)) + support = relation_support_ddl_replication(address.objectId, false); + + /* + * Do not generate wal log for commands whose target table is a + * temporary or unlogged table. + * + * XXX We may generate wal logs for unlogged tables in the future so + * that unlogged tables can also be created and altered on the + * subscriber side. This makes it possible to directly replay the SET + * LOGGED command and the incoming rewrite message without creating a + * new table. + */ + if (support) + LogLogicalDDLMessage("deparse", address.objectId, DCT_TableDropStart, + command, strlen(command) + 1); + + relation_close(relation, NoLock); + } else if (stmt->removeType == OBJECT_TYPE) { + support = type_support_ddl_replication(address.objectId); + if (support) + LogLogicalDDLMessage("deparse", address.objectId, + DCT_TypeDropStart, command, strlen(command) + 1); + } + } + return PointerGetDatum(NULL); +} + +static void finish_alter_table_ddl_command(CollectedCommand* cmd) +{ + ListCell *lc; + List *cmds; + Oid relid; + DeparsedCommandType type; + + relid = cmd->d.alterTable.objectId; + type = DCT_TableAlter; + + cmds = deparse_altertable_end(cmd); + foreach(lc, cmds) { + char* json_string = (char*)lfirst(lc); + if (json_string) { + LogLogicalDDLMessage("deparse", relid, type, json_string, + strlen(json_string) + 1); + } + } +} + +/* + * publication_deparse_table_rewrite + * + * Deparse the ddl table rewrite command and log it. + */ +Datum publication_deparse_table_rewrite(PG_FUNCTION_ARGS) +{ + bool support = false; + CollectedCommand *cmd; + char *json_string; - if (get_rel_relkind(address.objectId)) - support = relation_support_ddl_replication(address.objectId); - - /* - * Do not generate wal log for commands whose target table is a - * temporary or unlogged table. - * - * XXX We may generate wal logs for unlogged tables in the future so - * that unlogged tables can also be created and altered on the - * subscriber side. This makes it possible to directly replay the SET - * LOGGED command and the incoming rewrite message without creating a - * new table. - */ - if (support) - LogLogicalDDLMessage("deparse", address.objectId, DCT_TableDropStart, - command, strlen(command) + 1); - - relation_close(relation, NoLock); + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + cmd = currentEventTriggerState->currentCommand; + Assert(cmd && cmd->d.alterTable.rewrite); + + if (get_rel_relkind(cmd->d.alterTable.objectId)) + support = relation_support_ddl_replication(cmd->d.alterTable.objectId, true); + + /* + * Do not generate wal log for commands whose target table is a temporary + * or unlogged table. + * + * XXX We may generate wal logs for unlogged tables in the future so that + * unlogged tables can also be created and altered on the subscriber side. + * This makes it possible to directly replay the SET LOGGED command and the + * incoming rewrite message without creating a new table. + */ + if (support) { + ddl_deparse_context context; + context.verbose_mode = false; + context.include_owner = true; + context.max_volatility = PROVOLATILE_IMMUTABLE; + /* Deparse the DDL command and WAL log it to allow decoding of the same. */ + json_string = deparse_utility_command(cmd, &context); + if (json_string != NULL) { + check_command_publishable(context, true); + LogLogicalDDLMessage("deparse", cmd->d.alterTable.objectId, DCT_TableAlter, + json_string, strlen(json_string) + 1); + } } + return PointerGetDatum(NULL); } @@ -144,8 +254,7 @@ publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) * is called after the execution of the command but before the * transaction commits. */ -Datum -publication_deparse_ddl_command_end(PG_FUNCTION_ARGS) +Datum publication_deparse_ddl_command_end(PG_FUNCTION_ARGS) { ListCell *lc; slist_iter iter; @@ -162,33 +271,94 @@ publication_deparse_ddl_command_end(PG_FUNCTION_ARGS) CollectedCommand *cmd = (CollectedCommand*)lfirst(lc); char *json_string; - if (cmd->type == SCT_Simple && - !OidIsValid(cmd->d.simple.address.objectId)) + /* Rewrite DDL has been handled in table_rewrite trigger */ + if (cmd->d.alterTable.rewrite) { + if (cmd->type == SCT_AlterTable) { + relid = cmd->d.alterTable.objectId; + support = relation_support_ddl_replication(relid, true); + if (support) { + finish_alter_table_ddl_command(cmd); + } + continue; + } else if (cmd->parsetree && IsA(cmd->parsetree, RenameStmt)) { + RenameStmt *renameStmt = (RenameStmt *) cmd->parsetree; + + if (renameStmt && renameStmt->relationType != OBJECT_TYPE && + renameStmt->relationType != OBJECT_TABLE) + continue; + } + } + + if (cmd->type == SCT_Simple && cmd->parsetree && + !OidIsValid(cmd->d.simple.address.objectId)) { + relid = cmd->d.simple.address.objectId; + type = DCT_SimpleCmd; + /* + * handle some syntax which can not be capture by event trigger + * like rename table in dbcompatibility B + */ + if (IsA(cmd->parsetree, RenameStmt)) { + RenameStmt *renameStmt = (RenameStmt *) cmd->parsetree; + if (renameStmt->renameTableflag && renameStmt->renameTargetList) { + context.verbose_mode = false; + context.include_owner = true; + context.max_volatility = PROVOLATILE_IMMUTABLE; + json_string = deparse_utility_command(cmd, &context); + + if (json_string != NULL) { + check_command_publishable(context, false); + LogLogicalDDLMessage("deparse", relid, type, json_string, strlen(json_string) + 1); + } + } + } else if (DB_IS_CMPT(B_FORMAT) && (IsA(cmd->parsetree, CreateEventStmt) || + IsA(cmd->parsetree, AlterEventStmt) || + IsA(cmd->parsetree, DropEventStmt))) { + context.verbose_mode = false; + context.include_owner = true; + context.max_volatility = PROVOLATILE_IMMUTABLE; + json_string = deparse_utility_command(cmd, &context); + if (json_string != NULL) { + LogLogicalDDLMessage("deparse", relid, type, json_string, + strlen(json_string) + 1); + } + } + continue; + } - /* Only SCT_Simple for now */ - relid = cmd->d.simple.address.objectId; - type = DCT_SimpleCmd; + if (cmd->type == SCT_AlterTable) { + relid = cmd->d.alterTable.objectId; + type = DCT_TableAlter; + } else { + /* Only SCT_Simple for now */ + relid = cmd->d.simple.address.objectId; + type = DCT_SimpleCmd; + } - if (get_rel_relkind(relid)) { + if (get_rel_relkind(relid)) support = relation_support_ddl_replication(relid); - } + else if (cmd->d.simple.address.classId == TypeRelationId) + support = type_support_ddl_replication(relid); if (support) { /* * Deparse the DDL command and WAL log it to allow decoding of the * same. */ + context.verbose_mode = false; context.include_owner = true; context.max_volatility = PROVOLATILE_IMMUTABLE; json_string = deparse_utility_command(cmd, &context); if (json_string != NULL) { - check_command_publishable(context); + check_command_publishable(context, false); LogLogicalDDLMessage("deparse", relid, type, json_string, strlen(json_string) + 1); } + if (cmd->type == SCT_AlterTable) { + finish_alter_table_ddl_command(cmd); + } } } @@ -207,14 +377,34 @@ publication_deparse_ddl_command_end(PG_FUNCTION_ARGS) continue; } - if (strcmp(obj->objecttype, "table") == 0) { + if (strcmp(obj->objecttype, "table") == 0 || + strcmp(obj->objecttype, "index") == 0) { cmdtype = DCT_TableDropEnd; - } else if (strcmp(obj->objecttype, "index") == 0) { + } else if (strcmp(obj->objecttype, "type") == 0) { + cmdtype = DCT_TypeDropEnd; + } else if (strcmp(obj->objecttype, "schema") == 0 || + strcmp(obj->objecttype, "index") == 0 || + strcmp(obj->objecttype, "sequence") == 0 || + strcmp(obj->objecttype, "large sequence") == 0 || + strcmp(obj->objecttype, "view") == 0 || + strcmp(obj->objecttype, "function") == 0 || + strcmp(obj->objecttype, "trigger") == 0 || + strcmp(obj->objecttype, "function") == 0) { cmdtype = DCT_ObjectDrop; } else { continue; } + if (strcmp(obj->objecttype, "schema") == 0 && + (isTempNamespaceName(obj->objname) || isToastTempNamespaceName(obj->objname) + || strcmp(obj->objidentity, "pg_temp") == 0)) { + continue; + } + + if (!IsA((Node*)trigdata->parsetree, DropStmt)) { + continue; + } + command = deparse_drop_command(obj->objidentity, obj->objecttype, (Node*)trigdata->parsetree); if (command) diff --git a/src/gausskernel/storage/replication/logical/decode.cpp b/src/gausskernel/storage/replication/logical/decode.cpp index d6ebef5433..e69e336362 100644 --- a/src/gausskernel/storage/replication/logical/decode.cpp +++ b/src/gausskernel/storage/replication/logical/decode.cpp @@ -82,6 +82,8 @@ static void AreaDecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void AreaDecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void DecodeTruncate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void AreaDecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); @@ -749,6 +751,10 @@ static void DecodeHeap3Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) break; case XLOG_HEAP3_REWRITE: break; + case XLOG_HEAP3_TRUNCATE: + if (SnapBuildProcessChange(builder, xid, buf->origptr)) + DecodeTruncate(ctx, buf); + break; default: ereport(WARNING, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP3_ID record type: %u", info))); @@ -1930,6 +1936,42 @@ static void AreaDecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf } } + +/* + * Parse XLOG_HEAP_TRUNCATE from wal + */ +static void DecodeTruncate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + xl_heap_truncate *xlrec; + ReorderBufferChange *change; + int rc = 0; + xlrec = (xl_heap_truncate *) XLogRecGetData(r); + /* only interested in our database */ + if (xlrec->dbId != ctx->slot->data.database) { + return; + } + + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_TRUNCATE; + change->origin_id = XLogRecGetOrigin(r); + if (xlrec->flags & XLH_TRUNCATE_CASCADE) + change->data.truncate.cascade = true; + if (xlrec->flags & XLH_TRUNCATE_RESTART_SEQS) + change->data.truncate.restart_seqs = true; + change->data.truncate.nrelids = xlrec->nrelids; + change->data.truncate.relids = (Oid*)palloc(xlrec->nrelids * sizeof(Oid)); + rc = memcpy_s(change->data.truncate.relids, xlrec->nrelids * sizeof(Oid), xlrec->relids, + xlrec->nrelids * sizeof(Oid)); + securec_check(rc, "", ""); + ReorderBufferQueueChange(ctx, XLogRecGetXid(r), buf->origptr, change); +} + /* * Decode XLOG_HEAP2_MULTI_INSERT_insert record into multiple tuplebufs. * diff --git a/src/gausskernel/storage/replication/logical/logical.cpp b/src/gausskernel/storage/replication/logical/logical.cpp index 2116901753..e9fc32a0a1 100644 --- a/src/gausskernel/storage/replication/logical/logical.cpp +++ b/src/gausskernel/storage/replication/logical/logical.cpp @@ -82,6 +82,9 @@ static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relat static void parallel_change_cb_wrapper(ParallelReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, ParallelReorderBufferChange *change); +static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change); + static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, const char *plugin); static void LoadOutputPlugin(ParallelOutputPluginCallbacks *callbacks, const char *plugin); static void ddl_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, @@ -183,6 +186,7 @@ static LogicalDecodingContext *StartupDecodingContext(List *output_plugin_option /* wrap output plugin callbacks, so we can add error context information */ ctx->reorder->begin = begin_cb_wrapper; ctx->reorder->apply_change = change_cb_wrapper; + ctx->reorder->apply_truncate = truncate_cb_wrapper; ctx->reorder->commit = commit_cb_wrapper; ctx->reorder->ddl = ddl_cb_wrapper; @@ -453,6 +457,8 @@ LogicalDecodingContext *CreateInitDecodingContext(const char *plugin, List *outp startup_cb_wrapper(ctx, &ctx->options, true); (void)MemoryContextSwitchTo(old_context); + ctx->reorder->output_rewrites = ctx->options.receive_rewrites; + return ctx; } @@ -525,6 +531,7 @@ LogicalDecodingContext *CreateDecodingContext(XLogRecPtr start_lsn, List *output if (ctx->callbacks.startup_cb != NULL) startup_cb_wrapper(ctx, &ctx->options, false); (void)MemoryContextSwitchTo(old_context); + ctx->reorder->output_rewrites = ctx->options.receive_rewrites; ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errmsg("starting logical decoding for slot %s", NameStr(slot->data.name)), @@ -1008,6 +1015,48 @@ static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relat t_thrd.log_cxt.error_context_stack = errcallback.previous; } +static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + LogicalDecodingContext *ctx = (LogicalDecodingContext *)cache->private_data; + LogicalErrorCallbackState state; + ErrorContextCallback errcallback; + + Assert(!ctx->fast_forward); + + if (!ctx->callbacks.truncate_cb) { + return; + } + + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "truncate"; + state.report_location = change->lsn; + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *) &state; + errcallback.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &errcallback; + + /* set output state */ + ctx->accept_writes = true; + if (txn != NULL) { + ctx->write_xid = txn->xid; + } + + /* + * report this change's lsn so replies from clients can give an up2date + * answer. This won't ever be enough (and shouldn't be!) to confirm + * receipt of this transaction, but it might allow another transaction's + * commit to be confirmed with one message. + */ + ctx->write_location = change->lsn; + + ctx->callbacks.truncate_cb(ctx, txn, nrelations, relations, change); + + /* Pop the error context stack */ + t_thrd.log_cxt.error_context_stack = errcallback.previous; +} + bool filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id) { LogicalErrorCallbackState state; diff --git a/src/gausskernel/storage/replication/logical/logical_parse.cpp b/src/gausskernel/storage/replication/logical/logical_parse.cpp index cc37451832..9f0972ec83 100644 --- a/src/gausskernel/storage/replication/logical/logical_parse.cpp +++ b/src/gausskernel/storage/replication/logical/logical_parse.cpp @@ -487,6 +487,8 @@ void ParseHeap3Op(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, Pa break; case XLOG_HEAP3_INVALID: break; + case XLOG_HEAP3_TRUNCATE: + break; default: ereport(WARNING, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP3_ID record type: %u", info))); diff --git a/src/gausskernel/storage/replication/logical/proto.cpp b/src/gausskernel/storage/replication/logical/proto.cpp index 03920cba38..dc3d45c384 100644 --- a/src/gausskernel/storage/replication/logical/proto.cpp +++ b/src/gausskernel/storage/replication/logical/proto.cpp @@ -26,6 +26,9 @@ */ static const int LOGICALREP_IS_REPLICA_IDENTITY = 1; +#define TRUNCATE_CASCADE (1<<0) +#define TRUNCATE_RESTART_SEQS (1<<1) + static void logicalrep_write_attrs(StringInfo out, Relation rel); static void logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple, bool binary); @@ -260,6 +263,56 @@ LogicalRepRelId logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtu return relid; } +/* + * Write TRUNCATE to the output stream. + */ +void logicalrep_write_truncate(StringInfo out, + int nrelids, + Oid relids[], + bool cascade, bool restart_seqs) +{ + int i; + uint8 flags = 0; + + pq_sendbyte(out, 'T'); /* action TRUNCATE */ + + pq_sendint32(out, nrelids); + + /* encode and send truncate flags */ + if (cascade) + flags |= TRUNCATE_CASCADE; + if (restart_seqs) + flags |= TRUNCATE_RESTART_SEQS; + pq_sendint8(out, flags); + + for (i = 0; i < nrelids; i++) + pq_sendint32(out, relids[i]); +} + +/* + * Read TRUNCATE from stream. + */ +List *logicalrep_read_truncate(StringInfo in, + bool *cascade, bool *restart_seqs) +{ + int i; + int nrelids; + List *relids = NIL; + uint8 flags; + + nrelids = pq_getmsgint(in, 4); + + /* read and decode truncate flags */ + flags = pq_getmsgint(in, 1); + *cascade = (flags & TRUNCATE_CASCADE) > 0; + *restart_seqs = (flags & TRUNCATE_RESTART_SEQS) > 0; + + for (i = 0; i < nrelids; i++) + relids = lappend_oid(relids, pq_getmsgint(in, 4)); + + return relids; +} + /* * Write relation description to the output stream. */ diff --git a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp index f386df2bfc..326a505c84 100644 --- a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp +++ b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp @@ -353,6 +353,8 @@ static Size ReorderBufferChangeSize(ReorderBufferChange *change) break; } + case REORDER_BUFFER_CHANGE_TRUNCATE: + break; } return sz; } @@ -434,6 +436,8 @@ void ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change) change->data.utp.oldtuple = NULL; } break; + case REORDER_BUFFER_CHANGE_TRUNCATE: + break; } pfree(change); @@ -1453,6 +1457,7 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit Oid reloid; Oid partitionReltoastrelid = InvalidOid; bool isSegment = false; + Oid relrewrite = InvalidOid; switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: @@ -1503,8 +1508,10 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit RelationClose(relation); continue; } - - if (RelationIsLogicallyLogged(relation)) { + + relrewrite = RelationGetRelrewriteOption(relation); + if (RelationIsLogicallyLogged(relation) && + (!OidIsValid(relrewrite) || rb->output_rewrites)) { /* * For now ignore sequence changes entirely. Most of * the time they don't log changes using records we @@ -1593,6 +1600,35 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit case REORDER_BUFFER_CHANGE_DDL: ReorderBufferApplyDDLMessage(rb, txn, change); break; + case REORDER_BUFFER_CHANGE_TRUNCATE: { + int i; + int nrelids = change->data.truncate.nrelids; + int nrelations = 0; + Relation *relations; + + relations = (Relation *)palloc0(nrelids * sizeof(Relation)); + for (i = 0; i < nrelids; i++) { + Oid relid = change->data.truncate.relids[i]; + Relation relation; + + relation = RelationIdGetRelation(relid); + if (relation == NULL) { + elog(ERROR, "could not open relation with OID %u", relid); + } + + if (!RelationIsLogicallyLogged(relation)) { + continue; + } + + relations[nrelations++] = relation; + } + + rb->apply_truncate(rb, txn, nrelations, relations, change); + + for (i = 0; i < nrelations; i++) + RelationClose(relations[i]); + + } break; case REORDER_BUFFER_CHANGE_UINSERT: case REORDER_BUFFER_CHANGE_UDELETE: case REORDER_BUFFER_CHANGE_UUPDATE: @@ -2322,6 +2358,7 @@ static void ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *tx break; } + case REORDER_BUFFER_CHANGE_TRUNCATE: case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: /* ReorderBufferChange contains everything important */ break; @@ -2539,6 +2576,7 @@ static void ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, break; } /* the base struct contains all the data, easy peasy */ + case REORDER_BUFFER_CHANGE_TRUNCATE: case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID: break; diff --git a/src/gausskernel/storage/replication/logical/worker.cpp b/src/gausskernel/storage/replication/logical/worker.cpp index 1431b74e45..4c416cc9b4 100644 --- a/src/gausskernel/storage/replication/logical/worker.cpp +++ b/src/gausskernel/storage/replication/logical/worker.cpp @@ -943,7 +943,7 @@ static void apply_handle_update(StringInfo s) int remoteattnum = rel->attrmap[i]; if (!att->attisdropped && remoteattnum >= 0) { Assert(remoteattnum < newtup.ncols); - if (newtup.colstatus[i] != LOGICALREP_COLUMN_UNCHANGED) { + if (newtup.colstatus[remoteattnum] != LOGICALREP_COLUMN_UNCHANGED) { target_rte->updatedCols = bms_add_member(target_rte->updatedCols, i + 1 - FirstLowInvalidHeapAttributeNumber); } @@ -1167,6 +1167,66 @@ static void apply_handle_delete(StringInfo s) } +/* + * Handle TRUNCATE message. + */ +static void apply_handle_truncate(StringInfo s) +{ + bool cascade = false; + bool restart_seqs = false; + List *remote_relids = NIL; + List *remote_rels = NIL; + List *rels = NIL; + List *relids = NIL; + List *relids_logged = NIL; + ListCell *lc; + LOCKMODE lockmode = AccessExclusiveLock; + + ensure_transaction(); + + remote_relids = logicalrep_read_truncate(s, &cascade, &restart_seqs); + + foreach(lc, remote_relids) + { + LogicalRepRelId relid = lfirst_oid(lc); + LogicalRepRelMapEntry *rel; + + rel = logicalrep_rel_open(relid, lockmode); + if (!should_apply_changes_for_rel(rel)) { + /* + * The relation can't become interesting in the middle of the + * transaction so it's safe to unlock it. + */ + logicalrep_rel_close(rel, lockmode); + continue; + } + + ereport(LOG, (errmsg("apply [truncate] for %s", RelationGetRelationName(rel->localrel)))); + + remote_rels = lappend(remote_rels, rel); + rels = lappend(rels, rel->localrel); + relids = lappend_oid(relids, rel->localreloid); + if (RelationIsLogicallyLogged(rel->localrel)) + relids_logged = lappend_oid(relids_logged, rel->localreloid); + } + + /* + * Even if we used CASCADE on the upstream master we explicitly + * default to replaying changes without further cascading. + * This might be later changeable with a user specified option. + */ + ExecuteTruncateGuts(rels, relids, relids_logged, NIL, DROP_RESTRICT, restart_seqs, NULL); + + foreach(lc, remote_rels) + { + LogicalRepRelMapEntry *rel = (LogicalRepRelMapEntry*)lfirst(lc); + + logicalrep_rel_close(rel, NoLock); + } + + CommandCounterIncrement(); +} + /* * Handle CREATE TABLE command * @@ -1430,6 +1490,9 @@ static void apply_dispatch(StringInfo s) case 'D': apply_handle_delete(s); break; + case 'T': + apply_handle_truncate(s); + break; /* RELATION */ case 'R': apply_handle_relation(s); @@ -1909,6 +1972,7 @@ void ApplyWorkerMain() { MemoryContext oldctx; char originname[NAMEDATALEN]; + char dbname[NAMEDATALEN]; XLogRecPtr origin_startpos; char *myslotname; int rc = 0; @@ -2033,6 +2097,14 @@ void ApplyWorkerMain() t_thrd.proc_cxt.PostInit->SetDatabaseAndUser(NULL, t_thrd.applyworker_cxt.curWorker->dbid, NULL, t_thrd.applyworker_cxt.curWorker->userid); t_thrd.proc_cxt.PostInit->InitApplyWorker(); + /* has setDatabase and LockDatabase in InitApplyWorker */ + t_thrd.proc_cxt.PostInit->GetDatabaseName(dbname); + oldctx = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + if (u_sess->proc_cxt.MyProcPort->database_name) + pfree_ext(u_sess->proc_cxt.MyProcPort->database_name); + u_sess->proc_cxt.MyProcPort->database_name = pstrdup(dbname); + (void)MemoryContextSwitchTo(oldctx); + pgstat_report_appname("ApplyWorker"); pgstat_report_activity(STATE_IDLE, NULL); #if (!defined(ENABLE_MULTIPLE_NODES)) && (!defined(ENABLE_PRIVATEGAUSS)) diff --git a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp index 44cbed1f83..c6305fada9 100644 --- a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp +++ b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp @@ -43,6 +43,9 @@ static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *t static void pgoutput_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); +static void pgoutput_truncate(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, int nrelations, Relation relations[], + ReorderBufferChange *change); static void pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr message_lsn, const char *prefix, Oid relid, @@ -60,6 +63,7 @@ typedef struct PGOutputTxnData { bool sent_begin_txn; /* flag indicating where BEGIN has been set */ List *deleted_relids; /* maintain list of deleted table oids */ + List *deleted_typeids; /* maintain list of deleted type oids */ } PGOutputTxnData; /* Entry in the map used to remember which relation schemas we sent. */ @@ -86,6 +90,7 @@ void _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->startup_cb = pgoutput_startup; cb->begin_cb = pgoutput_begin_txn; cb->change_cb = pgoutput_change; + cb->truncate_cb = pgoutput_truncate; cb->commit_cb = pgoutput_commit_txn; cb->abort_cb = pgoutput_abort_txn; cb->filter_by_origin_cb = pgoutput_origin_filter; @@ -163,6 +168,7 @@ static void pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *o /* This plugin uses binary protocol. */ opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; + opt->receive_rewrites = true; /* * This is replication start and not slot initialization. @@ -225,6 +231,7 @@ clean_txn_data(ReorderBufferTXN *txn) return; list_free(txndata->deleted_relids); + list_free(txndata->deleted_typeids); pfree(txndata); txn->output_plugin_private = NULL; } @@ -368,6 +375,22 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, PGOutputData *data = (PGOutputData *)ctx->output_plugin_private; MemoryContext old; RelationSyncEntry *relentry; + Oid relrewrite = RelationGetRelrewriteOption(relation); + bool table_rewrite = false; + + if (OidIsValid(relrewrite)) { + table_rewrite = true; + relation = RelationIdGetRelation(relrewrite); + + if (REORDER_BUFFER_CHANGE_INSERT == change->action) { + Oid replidindex = RelationGetReplicaIndex(relation); + if (!OidIsValid(replidindex)) { + return; + } + } else if (REORDER_BUFFER_CHANGE_UPDATE != change->action) { + return; + } + } if (!is_publishable_relation(relation)) return; @@ -378,6 +401,14 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, return; } + /* + * We don't publish table rewrite change unless we publish the rewrite ddl + * message. + */ + if (table_rewrite && relentry->pubactions.pubddl == PUBDDL_NONE) { + return; + } + /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->common.context); @@ -391,7 +422,17 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_INSERT: if (change->data.tp.newtuple != NULL) { OutputPluginPrepareWrite(ctx, true); - logicalrep_write_insert(ctx->out, relation, &change->data.tp.newtuple->tuple, data->binary); + /* + * Convert the rewrite inserts to updates so that the subscriber + * can replay it. This is needed to make sure the data between + * publisher and subscriber is consistent. + */ + if (table_rewrite) { + logicalrep_write_update(ctx->out, relation, + NULL, &change->data.tp.newtuple->tuple, data->binary); + } else { + logicalrep_write_insert(ctx->out, relation, &change->data.tp.newtuple->tuple, data->binary); + } OutputPluginWrite(ctx, true); } break; @@ -450,10 +491,53 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, MemoryContextReset(data->common.context); } +static void pgoutput_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; + MemoryContext old; + RelationSyncEntry *relentry; + int i; + int nrelids; + Oid *relids; + + old = MemoryContextSwitchTo(data->common.context); + + relids = (Oid*)palloc0(nrelations * sizeof(Oid)); + nrelids = 0; + + for (i = 0; i < nrelations; i++) { + Relation relation = relations[i]; + Oid relid = RelationGetRelid(relation); + + if (!is_publishable_relation(relation)) { + continue; + } + + relentry = get_rel_sync_entry(data, relid); + if (!relentry->pubactions.pubtruncate) { + continue; + } + + relids[nrelids++] = relid; + MaybeSendSchema(ctx, relation, relentry); + } + + if (nrelids > 0) { + OutputPluginPrepareWrite(ctx, true); + logicalrep_write_truncate(ctx->out, + nrelids, + relids, + change->data.truncate.cascade, + change->data.truncate.restart_seqs); + OutputPluginWrite(ctx, true); + } + MemoryContextSwitchTo(old); + MemoryContextReset(data->common.context); +} /* Check if the given object is published. */ -static bool -is_object_published_ddl(LogicalDecodingContext *ctx, Oid objid) +static bool is_object_published_ddl(LogicalDecodingContext *ctx, Oid objid) { RelationSyncEntry *relentry; PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; @@ -483,11 +567,8 @@ is_object_published_ddl(LogicalDecodingContext *ctx, Oid objid) /* * Send the decoded DDL message. */ -static void -pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, - XLogRecPtr message_lsn, - const char *prefix, Oid relid, DeparsedCommandType cmdtype, - Size sz, const char *message) +static void pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr message_lsn, const char *prefix, + Oid relid, DeparsedCommandType cmdtype, Size sz, const char *message) { PGOutputTxnData *txndata = (PGOutputTxnData *) txn->output_plugin_private; @@ -496,7 +577,7 @@ pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, * we cannot get the required information from the catalog, so we skip the * check for them. */ - if (cmdtype != DCT_TableDropEnd && !is_object_published_ddl(ctx, relid)) { + if (cmdtype != DCT_TableDropEnd && cmdtype != DCT_TypeDropEnd && !is_object_published_ddl(ctx, relid)) { return; } @@ -530,6 +611,61 @@ pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, relid); break; + case DCT_TableAlter: + + /* + * For table rewrite ddl, we first send the original ddl message + * to subscriber, then convert the upcoming rewrite INSERT to + * UPDATE and send them to subscriber so that the data between + * publisher and subscriber can always be consistent. + * + * We do this way because of two reason: + * + * (1) The data before the rewrite ddl could already be different + * among publisher and subscriber. To make sure the extra data in + * subscriber which doesn't exist in publisher also get rewritten, + * we need to let the subscriber execute the original rewrite ddl + * to rewrite all the data at first. + * + * (2) the data after executing rewrite ddl could be different + * among publisher and subscriber(due to different + * functions/operators used during rewrite), so we need to + * replicate the rewrite UPDATEs to keep the data consistent. + * + * TO IMPROVE: We could improve this by letting the subscriber + * only rewrite the extra data instead of doing fully rewrite and + * use the upcoming rewrite UPDATEs to rewrite the rest data. + * Besides, we may not need to send rewrite changes for all type + * of rewrite ddl, for example, it seems fine to skip sending + * rewrite changes for ALTER TABLE SET LOGGED as the data in the + * table doesn't actually be changed. + */ + break; + + case DCT_TypeDropStart: { + MemoryContext old; + + init_txn_data(ctx, txn); + + txndata = (PGOutputTxnData *) txn->output_plugin_private; + + old = MemoryContextSwitchTo(ctx->context); + + txndata->deleted_typeids = lappend_oid(txndata->deleted_typeids, + relid); + + MemoryContextSwitchTo(old); + } + return; + + case DCT_TypeDropEnd: + if (!list_member_oid(txndata->deleted_typeids, relid)) { + return; + } + txndata->deleted_typeids = list_delete_oid(txndata->deleted_typeids, + relid); + break; + case DCT_SimpleCmd: case DCT_ObjectDrop: case DCT_ObjectCreate: @@ -691,6 +827,7 @@ static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, O entry->pubactions.pubinsert = false; entry->pubactions.pubupdate = false; entry->pubactions.pubdelete = false; + entry->pubactions.pubtruncate = false; entry->pubactions.pubddl = 0; foreach (lc, data->publications) { @@ -722,14 +859,17 @@ static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, O entry->pubactions.pubinsert |= pub->pubactions.pubinsert; entry->pubactions.pubupdate |= pub->pubactions.pubupdate; entry->pubactions.pubdelete |= pub->pubactions.pubdelete; + entry->pubactions.pubtruncate |= pub->pubactions.pubtruncate; if (entry->pubactions.pubddl != PUBDDL_ALL) { entry->pubactions.pubddl |= pub->pubactions.pubddl; } } - if (entry->pubactions.pubinsert && entry->pubactions.pubupdate && entry->pubactions.pubdelete && - pub->pubactions.pubddl == PUBDDL_ALL) - break; + if (entry->pubactions.pubinsert && entry->pubactions.pubupdate && + entry->pubactions.pubdelete && entry->pubactions.pubtruncate && + pub->pubactions.pubddl == PUBDDL_ALL) { + break; + } } list_free_ext(pubids); diff --git a/src/include/access/htup.h b/src/include/access/htup.h index 298d46334f..252e0af8d6 100644 --- a/src/include/access/htup.h +++ b/src/include/access/htup.h @@ -751,6 +751,8 @@ inline HeapTuple heaptup_alloc(Size size) #define XLOG_HEAP3_NEW_CID 0x00 #define XLOG_HEAP3_REWRITE 0x10 #define XLOG_HEAP3_INVALID 0x20 +/* XLOG_HEAP_TRUNCATE with 0x30 in heap in PG14 */ +#define XLOG_HEAP3_TRUNCATE 0x30 /* we used to put all xl_heap_* together, which made us run out of opcodes (quickly) * when trying to add a DELETE_IS_SUPER operation. Thus we split the codes carefully @@ -806,6 +808,26 @@ typedef struct xl_heap_delete { #define SizeOfOldHeapDelete (offsetof(xl_heap_delete, flags) + sizeof(uint8)) #define SizeOfHeapDelete (offsetof(xl_heap_delete, infobits_set) + sizeof(uint8)) +/* + * xl_heap_delete flag values, 8 bits are available. + */ +#define XLH_TRUNCATE_CASCADE (1<<0) +#define XLH_TRUNCATE_RESTART_SEQS (1<<1) + +/* + * For truncate we list all truncated relids in an array, followed by all + * sequence relids that need to be restarted, if any. + * All rels are always within the same database, so we just list dbid once. + */ +typedef struct xl_heap_truncate { + Oid dbId; + uint32 nrelids; + uint8 flags; + Oid relids[FLEXIBLE_ARRAY_MEMBER]; +} xl_heap_truncate; + +#define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids)) + /* * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted * or updated tuple in WAL; we can save a few bytes by reconstructing the diff --git a/src/include/catalog/heap.h b/src/include/catalog/heap.h index 01c4788eb7..3e883a7db6 100644 --- a/src/include/catalog/heap.h +++ b/src/include/catalog/heap.h @@ -118,30 +118,31 @@ extern Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltablespace, Oid relid, - Oid reltypeid, - Oid reloftypeid, - Oid ownerid, - TupleDesc tupdesc, - List *cooked_constraints, - char relkind, - char relpersistence, - bool shared_relation, - bool mapped_relation, - bool oidislocal, - int oidinhcount, - OnCommitAction oncommit, - Datum reloptions, - bool use_user_acl, - bool allow_system_table_mods, - PartitionState *partTableState, - int8 row_compress, - HashBucketInfo *bucketinfo, - bool record_dependce = true, - List* ceLst = NULL, - StorageType storage_type = HEAP_DISK, - LOCKMODE partLockMode = AccessExclusiveLock, + Oid reltypeid, + Oid reloftypeid, + Oid ownerid, + TupleDesc tupdesc, + List *cooked_constraints, + char relkind, + char relpersistence, + bool shared_relation, + bool mapped_relation, + bool oidislocal, + int oidinhcount, + OnCommitAction oncommit, + Datum reloptions, + bool use_user_acl, + bool allow_system_table_mods, + PartitionState *partTableState, + int8 row_compress, + HashBucketInfo *bucketinfo, + bool record_dependce = true, + List* ceLst = NULL, + StorageType storage_type = HEAP_DISK, + LOCKMODE partLockMode = AccessExclusiveLock, ObjectAddress *typaddress= NULL, - List* depend_extend = NIL); + List* depend_extend = NIL, + Oid relrewrite = InvalidOid); extern void heap_create_init_fork(Relation rel); diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h index 6a71b9eae1..07387d0589 100644 --- a/src/include/catalog/pg_publication.h +++ b/src/include/catalog/pg_publication.h @@ -24,6 +24,7 @@ /* Publication trigger events */ #define PUB_TRIG_DDL_CMD_END "ddl_command_end" #define PUB_TRIG_DDL_CMD_START "ddl_command_start" +#define PUB_TRIG_TBL_REWRITE "table_rewrite" /* Publication event trigger prefix */ #define PUB_EVENT_TRIG_FORMAT "pg_deparse_trig_%s_%u" @@ -62,6 +63,9 @@ CATALOG(pg_publication,6130) BKI_ROWTYPE_OID(6141) BKI_SCHEMA_MACRO bool pubdelete; int8 pubddl; + + /* true if truncates are published */ + bool pubtruncate; } FormData_pg_publication; #undef int8 @@ -78,7 +82,7 @@ typedef FormData_pg_publication *Form_pg_publication; * ---------------- */ -#define Natts_pg_publication 7 +#define Natts_pg_publication 8 #define Anum_pg_publication_pubname 1 #define Anum_pg_publication_pubowner 2 #define Anum_pg_publication_puballtables 3 @@ -86,11 +90,13 @@ typedef FormData_pg_publication *Form_pg_publication; #define Anum_pg_publication_pubupdate 5 #define Anum_pg_publication_pubdelete 6 #define Anum_pg_publication_pubddl 7 +#define Anum_pg_publication_pubtruncate 8 typedef struct PublicationActions { bool pubinsert; bool pubupdate; bool pubdelete; + bool pubtruncate; int64 pubddl; } PublicationActions; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_949.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_949.sql new file mode 100644 index 0000000000..07a9f6307e --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_949.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_table_rewrite() CASCADE; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_949.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_949.sql new file mode 100644 index 0000000000..07a9f6307e --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_949.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_table_rewrite() CASCADE; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_949.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_949.sql new file mode 100644 index 0000000000..6e9fc0e40b --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_949.sql @@ -0,0 +1,6 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_table_rewrite() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4644; +CREATE FUNCTION pg_catalog.publication_deparse_table_rewrite () +RETURNS event_trigger +LANGUAGE INTERNAL VOLATILE STRICT NOT FENCED +AS 'publication_deparse_table_rewrite'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_949.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_949.sql new file mode 100644 index 0000000000..6e9fc0e40b --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_949.sql @@ -0,0 +1,6 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_table_rewrite() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4644; +CREATE FUNCTION pg_catalog.publication_deparse_table_rewrite () +RETURNS event_trigger +LANGUAGE INTERNAL VOLATILE STRICT NOT FENCED +AS 'publication_deparse_table_rewrite'; diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h index cb0efcb416..4827c16a6e 100644 --- a/src/include/commands/defrem.h +++ b/src/include/commands/defrem.h @@ -210,10 +210,12 @@ extern Oid GetFunctionNodeGroupByFuncid(Oid funcid); extern Oid GetFunctionNodeGroup(AlterFunctionStmt* stmt); /* commands/eventcmds.c */ -extern void CreateEventCommand(CreateEventStmt* stmt); -extern void AlterEventCommand(AlterEventStmt* stmt); +extern ObjectAddress CreateEventCommand(CreateEventStmt* stmt); +extern ObjectAddress AlterEventCommand(AlterEventStmt* stmt); extern void DropEventCommand(DropEventStmt* stmt); - +extern char* parseIntervalExprString(Node *intervalNode); +extern char* parseTimeExprString(Node* timeExpr); + #endif /* !FRONTEND_PARSER */ extern DefElem* defWithOids(bool value); #endif /* DEFREM_H */ diff --git a/src/include/commands/event_trigger.h b/src/include/commands/event_trigger.h index a4f817fff4..3e52510486 100644 --- a/src/include/commands/event_trigger.h +++ b/src/include/commands/event_trigger.h @@ -110,8 +110,7 @@ extern void EventTriggerCollectSimpleCommand(ObjectAddress address, extern void EventTriggerAlterTableStart(Node *parsetree); extern void EventTriggerAlterTableRelid(Oid objectId); -extern void EventTriggerCollectAlterTableSubcmd(Node *subcmd, - ObjectAddress address); +extern void EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address, bool rewrite); extern void EventTriggerAlterTableEnd(void); extern void EventTriggerCollectGrant(InternalGrant *istmt); @@ -124,6 +123,8 @@ extern void EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, extern void EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId, Oid *dictIds, int ndicts); extern void EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt); - +extern void EventTriggerAlterTypeStart(AlterTableCmd *subcmd, Relation rel); +extern void EventTriggerAlterTypeEnd(Node *subcmd, ObjectAddress address, int rewrite); +extern void EventTriggerAlterTypeUpdate(ObjectAddress address, AttrNumber old_attnum); #endif /* EVENT_TRIGGER_H */ diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index bd86680b18..df2c4ed18b 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -159,6 +159,10 @@ extern void ExecuteTruncate(TruncateStmt* stmt, const char* sql_statement); extern void ExecuteTruncate(TruncateStmt* stmt); #endif +extern void ExecuteTruncateGuts( + List *explicit_rels, List *relids, List *relids_logged, List *rels_in_redis, + DropBehavior behavior, bool restart_seqs, TruncateStmt* stmt); + extern void SetRelationHasSubclass(Oid relationId, bool relhassubclass); extern ObjectAddress renameatt(RenameStmt* stmt); @@ -169,6 +173,7 @@ extern ObjectAddress RenameRelation(RenameStmt* stmt); extern void RenameRelationInternal(Oid myrelid, const char* newrelname, char* newschema = NULL); +extern void ResetRelRewrite(Oid myrelid); extern void find_composite_type_dependencies(Oid typeOid, Relation origRelation, const char* origTypeName); extern void check_of_type(HeapTuple typetuple); diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 9a9b00b8f0..0f078c1447 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -2956,6 +2956,7 @@ typedef struct knl_u_hook_context { void *nullsMinimalPolicyHook; void *getIgnoreKeywordTokenHook; void *modifyTypeForPartitionKeyHook; + void *deparseCollectedCommandHook; } knl_u_hook_context; typedef struct knl_u_libsw_context { diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index c2bafe7b13..72284dc63e 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -37,6 +37,7 @@ /***************************************************************************** * Backend version and inplace upgrade staffs *****************************************************************************/ +extern const uint32 PUBLICATION_DDL_AT_VERSION_NUM; extern const uint32 PIPELINED_FUNCTION_VERSION_NUM; extern const uint32 DISABLE_CONSTRAINT_VERSION_NUM; extern const uint32 SUPPORT_GS_DEPENDENCY_VERSION_NUM; diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index be0c438402..b6fff9b1f2 100755 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -1409,6 +1409,7 @@ typedef struct AlterFunctionStmt { NodeTag type; FuncWithArgs* func; /* name and args of function */ List* actions; /* list of DefElem */ + bool isProcedure = false; } AlterFunctionStmt; enum CompileEntry { diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h index 0a000d37b4..b818ba8fa6 100644 --- a/src/include/nodes/parsenodes_common.h +++ b/src/include/nodes/parsenodes_common.h @@ -1002,6 +1002,7 @@ typedef struct AlterTableCmd { /* one subcommand of an ALTER TABLE */ bool alterGPI; /* check whether is global partition index alter statement */ bool is_first; /* a flag of ALTER TABLE ... ADD ... FIRST */ char *after_name; /* column name of ALTER TABLE ... ADD ... AFTER column_name */ + bool recursing; } AlterTableCmd; typedef struct AddTableIntoCBIState { @@ -1166,6 +1167,7 @@ typedef struct ColumnDef { Form_pg_attribute dropped_attr; /* strcuture for dropped attribute during create table like OE */ char generatedCol; /* generated column setting */ Node *update_default; + char *initdefval; } ColumnDef; /* @@ -2473,6 +2475,7 @@ typedef struct RenameStmt { bool missing_ok; /* skip error if missing? */ List* renameTargetList = NULL; bool renameTableflag = false; + bool is_modifycolumn = false; } RenameStmt; /* ---------------------- diff --git a/src/include/replication/ddlmessage.h b/src/include/replication/ddlmessage.h index 60135e9e66..82a5f5e692 100644 --- a/src/include/replication/ddlmessage.h +++ b/src/include/replication/ddlmessage.h @@ -21,11 +21,14 @@ */ typedef enum DeparsedCommandType { - DCT_ObjectCreate, - DCT_ObjectDrop, DCT_SimpleCmd, DCT_TableDropEnd, - DCT_TableDropStart + DCT_TableDropStart, + DCT_TableAlter, + DCT_ObjectCreate, + DCT_ObjectDrop, + DCT_TypeDropStart, + DCT_TypeDropEnd } DeparsedCommandType; /* diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index aab505b160..a1d6463965 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -103,6 +103,10 @@ extern LogicalRepRelId logicalrep_read_update(StringInfo in, bool *has_oldtuple, LogicalRepTupleData *newtup); extern void logicalrep_write_delete(StringInfo out, Relation rel, HeapTuple oldtuple, bool binary); extern LogicalRepRelId logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup); +extern void logicalrep_write_truncate(StringInfo out, int nrelids, Oid relids[], + bool cascade, bool restart_seqs); +extern List *logicalrep_read_truncate(StringInfo in, + bool *cascade, bool *restart_seqs); extern void logicalrep_write_rel(StringInfo out, Relation rel); extern LogicalRepRelation *logicalrep_read_rel(StringInfo in); extern void logicalrep_write_typ(StringInfo out, Oid typoid); diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h index 0b19a8713d..b64cb085fa 100644 --- a/src/include/replication/output_plugin.h +++ b/src/include/replication/output_plugin.h @@ -30,6 +30,7 @@ typedef enum OutputPluginOutputType { */ typedef struct OutputPluginOptions { OutputPluginOutputType output_type; + bool receive_rewrites; } OutputPluginOptions; /* @@ -75,6 +76,15 @@ typedef void (*LogicalDecodeChangeCB)( typedef void (*ParallelLogicalDecodeChangeCB)( struct ParallelLogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ParallelReorderBufferChange* change); +/* + * Callback for every TRUNCATE in a successful transaction. + */ +typedef void (*LogicalDecodeTruncateCB) (struct LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + int nrelations, + Relation relations[], + ReorderBufferChange *change); + /* * Called for every (explicit or implicit) COMMIT of a successful transaction. */ @@ -107,6 +117,7 @@ typedef struct OutputPluginCallbacks { LogicalDecodeStartupCB startup_cb; LogicalDecodeBeginCB begin_cb; LogicalDecodeChangeCB change_cb; + LogicalDecodeTruncateCB truncate_cb; LogicalDecodeCommitCB commit_cb; LogicalDecodeAbortCB abort_cb; LogicalDecodePrepareCB prepare_cb; @@ -119,6 +130,7 @@ typedef struct ParallelOutputPluginCallbacks { LogicalDecodeStartupCB startup_cb; LogicalDecodeBeginCB begin_cb; ParallelLogicalDecodeChangeCB change_cb; + LogicalDecodeTruncateCB truncate_cb; LogicalDecodeCommitCB commit_cb; LogicalDecodeShutdownCB shutdown_cb; LogicalDecodeFilterByOriginCB filter_by_origin_cb; diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 8c8319bd3e..fa802d617d 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -73,6 +73,7 @@ enum ReorderBufferChangeType { REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID, REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID, REORDER_BUFFER_CHANGE_DDL, + REORDER_BUFFER_CHANGE_TRUNCATE, REORDER_BUFFER_CHANGE_UINSERT, REORDER_BUFFER_CHANGE_UUPDATE, REORDER_BUFFER_CHANGE_UDELETE @@ -115,6 +116,17 @@ typedef struct ReorderBufferChange { CommitSeqNo snapshotcsn; } tp; + /* + * Truncate data for REORDER_BUFFER_CHANGE_TRUNCATE representing + * one set of relations to be truncated. + */ + struct { + Size nrelids; + bool cascade; + bool restart_seqs; + Oid *relids; + } truncate; + /* Old, new utuples when action == UHEAP_INSERT|UPDATE|DELETE */ struct { /* relation that has been changed */ @@ -213,8 +225,8 @@ typedef struct ReorderBufferTXN { XLogRecPtr restart_decoding_lsn; /* origin of the change that caused this transaction */ - RepOriginId origin_id; - XLogRecPtr origin_lsn; + RepOriginId origin_id; + XLogRecPtr origin_lsn; /* The csn of the transaction */ CommitSeqNo csn; @@ -317,7 +329,9 @@ typedef struct ReorderBuffer ReorderBuffer; /* change callback signature */ typedef void (*ReorderBufferApplyChangeCB)( ReorderBuffer* rb, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change); - +/* truncate callback signature */ +typedef void (*ReorderBufferApplyTruncateCB) ( + ReorderBuffer *rb, ReorderBufferTXN *txn, int nrelations, Relation relations[], ReorderBufferChange *change); /* begin callback signature */ typedef void (*ReorderBufferBeginCB)(ReorderBuffer* rb, ReorderBufferTXN* txn); @@ -374,6 +388,7 @@ struct ReorderBuffer { */ ReorderBufferBeginCB begin; ReorderBufferApplyChangeCB apply_change; + ReorderBufferApplyTruncateCB apply_truncate; ReorderBufferCommitCB commit; ReorderBufferAbortCB abort; ReorderBufferPrepareCB prepare; @@ -384,6 +399,11 @@ struct ReorderBuffer { */ void* private_data; + /* + * Saved output plugin option + */ + bool output_rewrites; + /* * Private memory context. */ diff --git a/src/include/tcop/ddldeparse.h b/src/include/tcop/ddldeparse.h index 4f44aabe9e..325344a8a3 100644 --- a/src/include/tcop/ddldeparse.h +++ b/src/include/tcop/ddldeparse.h @@ -16,6 +16,7 @@ /* Context info needed for deparsing ddl command */ typedef struct { + bool verbose_mode; /* * include_owner indicates if the owner/role of the command should be * included in the deparsed Json output. It is set to false for any commands @@ -29,9 +30,73 @@ typedef struct char max_volatility; } ddl_deparse_context; +extern Relation table_open(Oid relationId, LOCKMODE lockmode); +extern void table_close(Relation relation, LOCKMODE lockmode); extern char *deparse_utility_command(CollectedCommand *cmd, ddl_deparse_context * context); extern char *deparse_ddl_json_to_string(char *jsonb, char** owner); extern char *deparse_drop_command(const char *objidentity, const char *objecttype, Node *parsetree); +extern List *deparse_altertable_end(CollectedCommand *cmd); +extern bool relation_support_ddl_replication(Oid relid, bool rewrite = false); +/* + * Before they are turned into JSONB representation, each command is + * represented as an object tree, using the structs below. + */ +typedef enum +{ + ObjTypeNull, + ObjTypeBool, + ObjTypeString, + ObjTypeArray, + ObjTypeInteger, + ObjTypeFloat, + ObjTypeObject +} ObjType; + +/* + * Represent the command as an object tree. + */ +typedef struct ObjTree +{ + slist_head params; /* Object tree parameters */ + int numParams; /* Number of parameters in the object tree */ + StringInfo fmtinfo; /* Format string of the ObjTree */ + bool present; /* Indicates if boolean value should be stored */ +} ObjTree; + +/* + * An element of an object tree (ObjTree). + */ +typedef struct ObjElem +{ + char *name; /* Name of object element */ + ObjType objtype; /* Object type */ + + union { + bool boolean; + char *string; + int64 integer; + float8 flt; + ObjTree *object; + List *array; + } value; /* Store the object value based on the object + * type */ + slist_node node; /* Used in converting back to ObjElem + * structure */ +} ObjElem; + +ObjTree *new_objtree_VA(const char *fmt, int numobjs, ...); +ObjElem *new_string_object(char *value); +void append_format_string(ObjTree *tree, char *sub_fmt); +void append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value); +void append_string_object(ObjTree *tree, char *sub_fmt, char *name, + const char *value); +void append_array_object(ObjTree *tree, char *sub_fmt, List *array); +typedef enum { + DEPARSE_SIMPLE_COMMAND, + ALTER_RELATION_SUBCMD +} collectCmdHookType; +typedef void *(*deparseCollectedCommand)(int type, CollectedCommand *cmd, CollectedATSubcmd *sub, + ddl_deparse_context *context); #endif /* DDL_DEPARSE_H */ diff --git a/src/include/tcop/deparse_utility.h b/src/include/tcop/deparse_utility.h index 8bd7f3926a..347836053c 100644 --- a/src/include/tcop/deparse_utility.h +++ b/src/include/tcop/deparse_utility.h @@ -38,6 +38,7 @@ typedef struct CollectedATSubcmd { ObjectAddress address; /* affected column, constraint, index, ... */ Node *parsetree; + char *usingexpr; } CollectedATSubcmd; typedef struct CollectedCommand @@ -61,6 +62,7 @@ typedef struct CollectedCommand { Oid objectId; Oid classId; + bool rewrite; List *subcmds; } alterTable; diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index f0f15ecd8f..35039ff727 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -416,6 +416,7 @@ typedef struct StdRdOptions { /* SPQ OPTIONS */ int spq_bt_build_offset; #endif + Oid relrewrite; } StdRdOptions; #define HEAP_MIN_FILLFACTOR 10 @@ -543,6 +544,8 @@ typedef struct StdRdOptions { ((relation)->rd_options ? \ ((StdRdOptions *) (relation)->rd_options)->parallel_workers : (defaultpw)) +#define RelationGetRelrewriteOption(relation) \ + ((relation)->rd_options ? ((StdRdOptions*)(relation)->rd_options)->relrewrite : InvalidOid) /* * RelationIsValid * True iff relation descriptor is valid. diff --git a/src/test/regress/expected/object_address.out b/src/test/regress/expected/object_address.out index 946c248c16..d2dcc40416 100644 --- a/src/test/regress/expected/object_address.out +++ b/src/test/regress/expected/object_address.out @@ -569,8 +569,8 @@ SELECT (pg_identify_object(addr1.classid, addr1.objid, addr1.subobjid)).* type | addr_nsp | gencomptype | addr_nsp.gencomptype type | addr_nsp | genenum | addr_nsp.genenum type | addr_nsp | gendomain | addr_nsp.gendomain - function | pg_catalog | | pg_identify_object(oid,oid,integer) - function | addr_nsp | | genaggr(integer) + function | pg_catalog | | pg_catalog.pg_identify_object(oid,oid,integer) + function | addr_nsp | | addr_nsp.genaggr(integer) sequence | addr_nsp | gentable_a_seq | addr_nsp.gentable_a_seq table | addr_nsp | gentable | addr_nsp.gentable table column | addr_nsp | gentable | addr_nsp.gentable.b diff --git a/src/test/regress/expected/on_update_session2.out b/src/test/regress/expected/on_update_session2.out index fef93c808d..88a23239f9 100644 --- a/src/test/regress/expected/on_update_session2.out +++ b/src/test/regress/expected/on_update_session2.out @@ -8,7 +8,7 @@ select pg_sleep(2); update tb666 set c2 = 3; select * from tb666; c1 | c2 | c3 ----------------------------------+----+--------------------------------- +--?.*-----------------+----+------------------.* --?.* (1 row) diff --git a/src/test/regress/output/publication.source b/src/test/regress/output/publication.source index c7234d93cb..09058f5c20 100644 --- a/src/test/regress/output/publication.source +++ b/src/test/regress/output/publication.source @@ -200,8 +200,8 @@ select pubname, tablename from pg_publication_tables where tablename='testpub_tb --- drop publication DROP PUBLICATION testpub_foralltables_rename; select * from pg_publication where pubname='testpub_foralltables_rename'; - pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete | pubddl ----------+----------+--------------+-----------+-----------+-----------+-------- + pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete | pubddl | pubtruncate +---------+----------+--------------+-----------+-----------+-----------+--------+------------- (0 rows) DROP PUBLICATION IF EXISTS testpub_nonexists; diff --git a/src/test/regress/pg_regress.cpp b/src/test/regress/pg_regress.cpp index 2e06a819a5..c4bc35f712 100644 --- a/src/test/regress/pg_regress.cpp +++ b/src/test/regress/pg_regress.cpp @@ -5516,7 +5516,7 @@ static void check_global_variables() } } -#define BASE_PGXC_LIKE_MACRO_NUM 1381 +#define BASE_PGXC_LIKE_MACRO_NUM 1382 static void check_pgxc_like_macros() { #ifdef BUILD_BY_CMAKE diff --git a/src/test/subscription/schedule b/src/test/subscription/schedule index 77099239ba..d39f9cd7d3 100644 --- a/src/test/subscription/schedule +++ b/src/test/subscription/schedule @@ -1,4 +1,5 @@ rep_changes +ddl_replication pub_switchover types constraints @@ -15,5 +16,4 @@ skiplsn disable pub_subconflict bugs -ddl_replication dump \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff b/src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff deleted file mode 100644 index 3d25e9febc..0000000000 --- a/src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff +++ /dev/null @@ -1,25 +0,0 @@ -6109,6132d6108 -< -- Name: tab_foreign_child_col_a_fkey; Type: FK CONSTRAINT; Schema: public; Owner: ddl_test_user -< -- -< -< ALTER TABLE tab_foreign_child -< ADD CONSTRAINT tab_foreign_child_col_a_fkey FOREIGN KEY (col_a, col_b) REFERENCES tab_foreign_parent(col_1, col_2); -< -< -< -- -< -- Name: tab_product2_currency_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: ddl_test_user -< -- -< -< ALTER TABLE tab_product2 -< ADD CONSTRAINT tab_product2_currency_id_fkey FOREIGN KEY (currency_id) REFERENCES tab_currency(col_id); -< -< -< -- -< -- Name: tab_product_currency_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: ddl_test_user -< -- -< -< ALTER TABLE tab_product -< ADD CONSTRAINT tab_product_currency_id_fkey FOREIGN KEY (currency_id) REFERENCES tab_currency(col_id); -< -< -< -- diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql b/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql index 39fa0676e2..be55e3ed9b 100644 --- a/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql +++ b/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql @@ -767,6 +767,15 @@ INSERT INTO func_index_heap VALUES('ABCD', 'EF'); -- but this shouldn't: INSERT INTO func_index_heap VALUES('QWERTY'); +DROP TABLE if exists tb_truncate; +CREATE TABLE tb_truncate (f1 text primary key, f2 text); +INSERT INTO tb_truncate VALUES('ABC','DEF'); +INSERT INTO tb_truncate VALUES('AB','CDEFG'); +INSERT INTO tb_truncate VALUES('QWE','RTY'); +INSERT INTO tb_truncate VALUES('ABCD', 'EF'); +INSERT INTO tb_truncate VALUES('QWERTY','IJBN'); +truncate TABLE tb_truncate; + -- -- Also try building functional, expressional, and partial indexes on -- tables that already contain data. @@ -820,3 +829,15 @@ DROP INDEX CONCURRENTLY "concur_index4"; DROP INDEX CONCURRENTLY "concur_index5"; DROP INDEX CONCURRENTLY "concur_index1"; DROP INDEX CONCURRENTLY "concur_heap_expr_idx"; + + +CREATE TABLE Ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); +CREATE INDEX ctlt1_b_key ON Ctlt1 (b); +CREATE INDEX ctlt1_fnidx ON Ctlt1 ((a || b)); +COMMENT ON COLUMN Ctlt1.a IS 'A'; +COMMENT ON COLUMN Ctlt1.b IS 'B'; +COMMENT ON CONSTRAINT ctlt1_a_check ON Ctlt1 IS 't1_a_check'; +COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; +COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; +ALTER TABLE Ctlt1 ALTER COLUMN a SET STORAGE MAIN; + diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.setup b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.setup new file mode 100644 index 0000000000..234cbb4cd1 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.setup @@ -0,0 +1,9 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" + +exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.sql new file mode 100644 index 0000000000..c1c47769c8 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.sql @@ -0,0 +1,55 @@ +-- +-- IMMUTABLE | STABLE | VOLATILE +-- +CREATE FUNCTION functest_B_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_B_2(int) RETURNS bool LANGUAGE 'sql' + IMMUTABLE AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_B_3(int) RETURNS bool LANGUAGE 'sql' + STABLE AS 'SELECT $1 = 0'; +CREATE FUNCTION functest_B_4(int) RETURNS bool LANGUAGE 'sql' + VOLATILE AS 'SELECT $1 < 0'; +ALTER FUNCTION functest_B_2(int) VOLATILE; + +-- +-- SECURITY DEFINER | INVOKER +-- +CREATE FUNCTION functest_C_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' + SECURITY DEFINER AS 'SELECT $1 = 0'; +CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' + SECURITY INVOKER AS 'SELECT $1 < 0'; +ALTER FUNCTION functest_C_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_C_2(int) SECURITY INVOKER; +ALTER FUNCTION functest_C_3(int) SECURITY DEFINER; + +-- +-- LEAKPROOF +-- +CREATE FUNCTION functest_E_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 100'; +CREATE FUNCTION functest_E_2(int) RETURNS bool LANGUAGE 'sql' + LEAKPROOF AS 'SELECT $1 > 100'; +ALTER FUNCTION functest_E_1(int) LEAKPROOF; +ALTER FUNCTION functest_E_2(int) STABLE; -- unrelated change, no effect +ALTER FUNCTION functest_E_2(int) NOT LEAKPROOF; -- remove leakproog attribute +-- it takes superuser privilege to turn on leakproof, but not for turn off +--ALTER FUNCTION functest_E_1(int) OWNER TO regtest_unpriv_user; +--ALTER FUNCTION functest_E_2(int) OWNER TO regtest_unpriv_user; + + +-- +-- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT +-- +CREATE FUNCTION functest_F_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 50'; +CREATE FUNCTION functest_F_2(int) RETURNS bool LANGUAGE 'sql' + CALLED ON NULL INPUT AS 'SELECT $1 = 50'; +CREATE FUNCTION functest_F_3(int) RETURNS bool LANGUAGE 'sql' + RETURNS NULL ON NULL INPUT AS 'SELECT $1 < 50'; +CREATE FUNCTION functest_F_4(int) RETURNS bool LANGUAGE 'sql' + STRICT AS 'SELECT $1 = 50'; +ALTER FUNCTION functest_F_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_F_2(int) STRICT; +ALTER FUNCTION functest_F_3(int) CALLED ON NULL INPUT; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.teardown b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.teardown new file mode 100644 index 0000000000..4e450e63b0 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_function.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $sub_node1_port "DROP USER regtest_unpriv_user" +exec_sql_with_user $case_use_db $pub_node1_port "DROP USER regtest_unpriv_user" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table.sql new file mode 100644 index 0000000000..5c8d7126b1 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table.sql @@ -0,0 +1,2418 @@ +-- +--FOR BLACKLIST FEATURE: REFERENCES/INHERITS/WITH OIDS/RULE/CREATE TYPE/DOMAIN is not supported. +-- +-- test inheritance + +create table dropColumn (a int, b int, e int); +create table dropColumnChild (c int) inherits (dropColumn); +create table dropColumnAnother (d int) inherits (dropColumnChild); + +-- these two should fail +alter table dropColumnchild drop column a; +alter table only dropColumnChild drop column b; + + + +-- these three should work +alter table only dropColumn drop column e; +alter table dropColumnChild drop column c; +alter table dropColumn drop column a; + +create table renameColumn (a int); +create table renameColumnChild (b int) inherits (renameColumn); +create table renameColumnAnother (c int) inherits (renameColumnChild); + +-- these three should fail +alter table renameColumnChild rename column a to d; +alter table only renameColumnChild rename column a to d; +alter table only renameColumn rename column a to d; + +-- these should work +alter table renameColumn rename column a to d; +alter table renameColumnChild rename column b to a; + +-- these should work +alter table if exists doesnt_exist_tab rename column a to d; +alter table if exists doesnt_exist_tab rename column b to a; + +-- this should work +alter table renameColumn add column w int; + +-- this should fail +alter table only renameColumn add column x int; + + +-- Test corner cases in dropping of inherited columns + +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +-- should work +alter table p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +select f1 from c1; +alter table c1 drop column f1; +select f1 from c1; + +drop table p1 cascade; + +create table p1 (f1 int, f2 int); +create table c1 () inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +alter table p1 drop column f1; +-- c1.f1 is dropped now, since there is no local definition for it +select f1 from c1; + +drop table p1 cascade; + +create table p1 (f1 int, f2 int); +create table c1 () inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +alter table only p1 drop column f1; +-- c1.f1 is NOT dropped, but must now be considered non-inherited +alter table c1 drop column f1; + +drop table p1 cascade; + +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +alter table only p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +alter table c1 drop column f1; + +drop table p1 cascade; + +create table p1(id int, name text); +create table p2(id2 int, name text, height int); +create table c1(age int) inherits(p1,p2); +create table gc1() inherits (c1); + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +-- should work +alter table only p1 drop column name; +-- should work. Now c1.name is local and inhcount is 0. +alter table p2 drop column name; +-- should be rejected since its inherited +alter table gc1 drop column name; +-- should work, and drop gc1.name along +alter table c1 drop column name; +-- should fail: column does not exist +alter table gc1 drop column name; +-- should work and drop the attribute in all tables +alter table p2 drop column height; + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +drop table p1, p2 cascade; + +-- +-- Test the ALTER TABLE SET WITH/WITHOUT OIDS command +-- +create table altstartwith (col integer) with oids; + +insert into altstartwith values (1); + +select oid > 0, * from altstartwith; + +alter table altstartwith set without oids; + +select oid > 0, * from altstartwith; -- fails +select * from altstartwith; + +alter table altstartwith set with oids; + +select oid > 0, * from altstartwith; + +drop table altstartwith; + +-- Check inheritance cases +create table altwithoid (col integer) with oids; + +-- Inherits parents oid column anyway +create table altinhoid () inherits (altwithoid) without oids; + +insert into altinhoid values (1); + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +alter table altwithoid set without oids; + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; -- fails +select * from altwithoid; +select * from altinhoid; + +alter table altwithoid set with oids; + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +drop table altwithoid cascade; + +create table altwithoid (col integer) without oids; + +-- child can have local oid column +create table altinhoid () inherits (altwithoid) with oids; + +insert into altinhoid values (1); + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; + +alter table altwithoid set with oids; + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +-- the child's local definition should remain +alter table altwithoid set without oids; + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; + +drop table altwithoid cascade; + +-- test renumbering of child-table columns in inherited operations + +create table p1 (f1 int); +create table c1 (f2 text, f3 int) inherits (p1); + +alter table p1 add column a1 int check (a1 > 0); +alter table p1 add column f2 text; + +insert into p1 values (1,2,'abc'); +insert into c1 values(11,'xyz',33,0); -- should fail +insert into c1 values(11,'xyz',33,22); + +select * from p1 order by f1; +update p1 set a1 = a1 + 1, f2 = upper(f2); +select * from p1 order by f1; + +drop table p1 cascade; + +-- test that operations with a dropped column do not try to reference +-- its datatype + +--create domain mytype as text; +--create table foo (f1 text, f2 mytype, f3 text);; + +insert into foo values('bb','cc','dd'); +select * from foo order by f1; + +--drop domain mytype cascade; + +--select * from foo order by f1; +--insert into foo values('qq','rr'); +--select * from foo order by f1; +--update foo set f3 = 'zz'; +--select * from foo order by f1; +--select f3,max(f1) from foo group by f3; + +-- Simple tests for alter table column type +--delete from foo where f1 = 'qq'; +--alter table foo alter f1 TYPE integer; -- fails +--alter table foo alter f1 TYPE varchar(10); +--drop table foo; + +create table anothertab (atcol1 serial8, atcol2 boolean, + constraint anothertab_chk check (atcol1 <= 3));; +alter table anothertab replica identity full; +insert into anothertab (atcol1, atcol2) values (1, true); +insert into anothertab (atcol1, atcol2) values (3, false); +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol1 type boolean; -- we cannot support this cast with numeric nextval +alter table anothertab alter column atcol1 type integer; + +select * from anothertab order by atcol1, atcol2; + +insert into anothertab (atcol1, atcol2) values (45, null); -- fails +--insert into anothertab (atcol1, atcol2) values (default, null); + +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol2 type text + using case when atcol2 is true then 'IT WAS TRUE' + when atcol2 is false then 'IT WAS FALSE' + else 'IT WAS NULL!' end; + +select * from anothertab order by atcol1, atcol2; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab alter column atcol1 drop default; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab drop constraint anothertab_chk; +alter table anothertab drop constraint anothertab_chk; -- fails +alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds + +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; + +select * from anothertab order by atcol1, atcol2; + +-- drop table anothertab; + +create table another (f1 int, f2 text);; +alter table another replica identity full; +insert into another values(1, 'one'); +insert into another values(2, 'two'); +insert into another values(3, 'three'); + +select * from another order by f1, f2; + +alter table another + alter f1 type text using f2 || ' more', + alter f2 type bigint using f1 * 10; + +select * from another order by f1, f2; + +-- drop table another; + +-- table's row type +create table tab1 (a int, b text); +create table tab2 (x int, y tab1); +alter table tab1 alter column b type varchar; -- fails + +-- disallow recursive containment of row types +create table recur1 (f1 int); +alter table recur1 add column f2 recur1; -- fails +alter table recur1 add column f2 recur1[]; -- fails +--create domain array_of_recur1 as recur1[]; +--alter table recur1 add column f2 array_of_recur1; -- fails +create table recur2 (f1 int, f2 recur1); +alter table recur1 add column f2 recur2; -- fails +alter table recur1 add column f2 int; +alter table recur1 alter column f2 type recur2; -- fails + +-- SET STORAGE may need to add a TOAST table +create table test_storage (a text); +alter table test_storage alter a set storage plain; +alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table +alter table test_storage alter a set storage extended; -- re-add TOAST table + +select reltoastrelid <> 0 as has_toast_table +from pg_class +where oid = 'test_storage'::regclass; + +-- ALTER TYPE with a check constraint and a child table (bug before Nov 2012) +CREATE TABLE test_inh_check (a float check (a > 10.2)); +CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); +ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +\d test_inh_check +\d test_inh_check_child + +-- +-- lock levels +-- +drop type lockmodes; +create type lockmodes as enum ( + 'AccessShareLock' +,'RowShareLock' +,'RowExclusiveLock' +,'ShareUpdateExclusiveLock' +,'ShareLock' +,'ShareRowExclusiveLock' +,'ExclusiveLock' +,'AccessExclusiveLock' +); + +drop view my_locks; +create or replace view my_locks as +select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode +from pg_locks l join pg_class c on l.relation = c.oid +where virtualtransaction = ( + select virtualtransaction + from pg_locks + where transactionid = txid_current()::integer) +and locktype = 'relation' +and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') +and c.relname != 'my_locks' +group by c.relname; + +create table alterlock (f1 int primary key, f2 text); + +start transaction; alter table alterlock alter column f2 set statistics 150; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock cluster on alterlock_pkey; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set without cluster; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (fillfactor = 100); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock reset (fillfactor); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (toast.autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock alter column f2 set (n_distinct = 1); +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set storage extended; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set default 'x'; +select * from my_locks order by 1; +rollback; + +-- cleanup +drop table alterlock; +drop view my_locks; +drop type lockmodes; + +-- +-- alter function +-- +-- create function test_strict(text) returns text as +-- 'select coalesce($1, ''got passed a null'');' +-- language sql returns null on null input; +-- select test_strict(NULL); +-- alter function test_strict(text) called on null input; +-- select test_strict(NULL); + +-- create function non_strict(text) returns text as +-- 'select coalesce($1, ''got passed a null'');' +-- language sql called on null input; +-- select non_strict(NULL); +-- alter function non_strict(text) returns null on null input; +-- select non_strict(NULL); + +-- +-- alter object set schema +-- + +create schema alter1; +create schema alter2; + +create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0)); + +create view alter1.v1 as select * from alter1.t1; + +create function alter1.plus1(int) returns int as 'select $1+1' language sql; + +--create domain alter1.posint integer check (value > 0); + +create type alter1.ctype as (f1 int, f2 text); + +create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql +as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; + +create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype); + +create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as + operator 1 alter1.=(alter1.ctype, alter1.ctype); + +create conversion alter1.ascii_to_utf8 for 'sql_ascii' to 'utf8' from ascii_to_utf8; + +create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +create text search configuration alter1.cfg(parser = alter1.prs); +create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); +create text search dictionary alter1.dict(template = alter1.tmpl); + +insert into alter1.t1(f2) values(11); +insert into alter1.t1(f2) values(12); + +alter table alter1.t1 set schema alter2; +alter table alter1.v1 set schema alter2; +alter function alter1.plus1(int) set schema alter2; +--alter domain alter1.posint set schema alter2; +alter operator class alter1.ctype_hash_ops using hash set schema alter2; +alter operator family alter1.ctype_hash_ops using hash set schema alter2; +alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2; +alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2; +alter type alter1.ctype set schema alter2; +alter conversion alter1.ascii_to_utf8 set schema alter2; +alter text search parser alter1.prs set schema alter2; +alter text search configuration alter1.cfg set schema alter2; +alter text search template alter1.tmpl set schema alter2; +alter text search dictionary alter1.dict set schema alter2; + +-- this should succeed because nothing is left in alter1 +drop schema alter1; + +insert into alter2.t1(f2) values(13); +insert into alter2.t1(f2) values(14); + +select * from alter2.t1 order by f1, f2; + +select * from alter2.v1 order by f1, f2; + +select alter2.plus1(41); + +-- clean up +drop schema alter2 cascade; +drop schema alter1 cascade; + +-- +-- composite types +-- + +CREATE TYPE test_type AS (a int); +\d test_type + +ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ADD ATTRIBUTE b text; +\d test_type + +ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +\d test_type + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE b; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE c; -- fails + +ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; + +ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +\d test_type + +ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; +ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +\d test_type + +DROP TYPE test_type; + +CREATE TYPE test_type1 AS (a int, b text); +CREATE TABLE test_tbl1 (x int, y test_type1); +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails + +CREATE TYPE test_type2 AS (a int, b text); +CREATE TABLE test_tbl2 OF test_type2; +CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails +ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails +ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +\d test_type2 +\d test_tbl2 +\d test_tbl2_subclass + +DROP TABLE test_tbl2_subclass; +alter table test_tbl2 not of; +-- This test isn't that interesting on its own, but the purpose is to leave +-- behind a table to test pg_upgrade with. The table has a composite type +-- column in it, and the composite type has a dropped attribute. +CREATE TYPE test_type3 AS (a int); +CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; +ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; + +CREATE TYPE test_type_empty AS (); + +-- +-- typed tables: OF / NOT OF +-- + +CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); +ALTER TYPE tt_t0 DROP ATTRIBUTE z; +CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK +CREATE TABLE tt1 (x int, y bigint); -- wrong base type +CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod +CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order +CREATE TABLE tt4 (x int); -- too few columns +CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns +CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent +CREATE TABLE tt7 (x int, q text, y numeric(8,2)) WITH OIDS; +ALTER TABLE tt7 DROP q; -- OK + +ALTER TABLE tt0 OF tt_t0; +ALTER TABLE tt1 OF tt_t0; +ALTER TABLE tt2 OF tt_t0; +ALTER TABLE tt3 OF tt_t0; +ALTER TABLE tt4 OF tt_t0; +ALTER TABLE tt5 OF tt_t0; +ALTER TABLE tt6 OF tt_t0; +ALTER TABLE tt7 OF tt_t0; + +CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); +ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table +ALTER TABLE tt7 NOT OF; +\d tt7 +alter table tt0 not of; +-- make sure we can drop a constraint on the parent but it remains on the child +CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); +CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent); +ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; +-- should fail +INSERT INTO test_drop_constr_child (c) VALUES (NULL); +DROP TABLE test_drop_constr_parent CASCADE; + +-- +-- IF EXISTS test +-- +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +CREATE TABLE tt8(a int); +CREATE SCHEMA alter2; + +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +\d alter2.tt8 + +DROP TABLE alter2.tt8; +DROP SCHEMA alter2; + +-- create database test_first_after_A dbcompatibility 'A'; +-- \c test_first_after_A + +-- test add column ... first | after columnname +-- common scenatios +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 clob first, add f7 blob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +\d+ t1 +select * from t1; + +-- 1 primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 blob first, add f7 clob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +select * from t1; +------------------------------------------------------------------------------------------- +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3; +\d+ t1; + +-- 2 unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3; +select * from t1; + +-- 3 default and generated column +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1, 2, 3), (11, 22, 33); +alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5; +select * from t1; + +-- 5 NULL and NOT NULL +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3; +\d+ t1 + +-- 6 check constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3; +select * from t1; + +-- 7 foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t_pri2(f1 text, f2 bool, f4 int primary key); +insert into t_pri2 values('a', true, 1), ('b', false, 2); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 2, true), (2, 2, false); +alter table t1 drop f2, add f4 int references t_pri2(f4) first; +select * from t1; +alter table t1 drop f4, add f4 int references t_pri2(f4) after f1; +select * from t1; + +-- partition table +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +-- subpartition table +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +-- test pg_partition +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +alter table range_range add f1 int default 1 first, add f2 text after id; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +select * from range_range; + +-- pg_index test +drop table if exists t1 cascade; +create table t1 +( + f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int, + primary key(f1, f2), + unique(f3, f4), + check(f5 = 10) +); +create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0; + +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +alter table t1 add f8 int first, add f9 int unique after f1; +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +-- pg_attribute test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 add f4 int default 4 first; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 drop f2, add f5 int default 5 after f1; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +-- pg_attrdef test +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored); +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 add f5 text default 'aaa' first; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +-- pg_depend test +drop table if exists t1 cascade; +create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored); +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +alter table t1 add t1 add f4 int first; +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; +alter table t1 drop f2, add f6 int, add f7 int generated always as (f1 + f6) stored after f1; +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +-- pg_rewrite test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +insert into t1 values(1, 2, 3), (11, 22, 33); +create view t1_view1 as select * from t1; +create view t1_view2 as select f1, f2 from t1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; + +-- pg_trigger test +drop table if exists t1 cascade; +create table t1(f1 boolean not null, f2 text, f3 int, f4 date); +alter table t1 add primary key(f1); +create or replace function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; + +drop trigger if exists f1_trig_update on t1; +drop trigger if exists f1_trig_insert on t1; + +create trigger f1_trig_update after update of f1 on t1 for each row + when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); +create trigger f1_trig_insert after insert on t1 for each row + when (not new.f1) execute procedure dummy_update_func('insert'); + +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +alter table t1 add f5 int after f1, add f6 boolean first; +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +-- pg_rlspolicy test +drop table if exists t1 cascade; +drop role if exists test_rlspolicy; +create role test_rlspolicy nologin password 'Gauss_234'; +create table t1 (f1 int, f2 int, f3 text) partition by range (f1) +( + partition t1_p0 values less than(10), + partition t1_p1 values less than(50), + partition t1_p2 values less than(100), + partition t1_p3 values less than(MAXVALUE) +); + +INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei'); +grant select on t1 to public; + +create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20); +create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy using (f1 < 30); + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first; +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); +drop table if exists t1 cascade; + +-- \c postgres +-- drop database test_first_after_A; + +-- test add column ... first | after columnname in B compatibility +-- create database test_first_after_B dbcompatibility 'b'; +-- \c test_first_after_B + +-- test add column ... first | after columnname in astore table +-- ASTORE table +-- common scenatios +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 clob first, add f7 blob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +\d+ t1 +select * from t1; + +-- 1 primary key +-- 1.1.1 primary key in original table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 clob first, add f7 blob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +\d+ t1 + +-- 1.1.2 primary key in original table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 blob first, add f7 clob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +select * from t1; + +-- 1.2.1 primary key in a table without data, add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +-- error +alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3; +select * from t1; + +-- 1.2.2 primary key in a table with data, add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3; +select * from t1; + +-- 1.3.1 primary key in a table without data, drop primary key, then add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3; +\d+ t1; + +-- 1.3.2 primary key in a table with data, drop primary key, then add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1; +-- error +alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3; +select * from t1; + +-- 1.4.1 primary key in a table without data, drop primary key, the add column with primary key and default +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key default 7 first, add f8 float after f3; +\d+ t1 + +-- 1.4.2 primary key in a table with data, drop primary key, then add column with primary key and default +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1; +-- error +alter table t1 add f6 text, add f7 int primary key default 7 first, add f8 float after f3; +select * from t1; + +-- 1.5.1 primary key in a table without data, drop primary key, the add column with primary key and auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3; +\d+ t1 + +-- 1.5.2 primary key in a table with data, drop primary key, the add column with primary key and auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3; +select * from t1; + +-- 2 unique index +-- 2.1.1 unique index in a table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int first, add f7 float after f3; +\d+ t1 + +-- 2.1.2 unique index in a table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int first, add f7 float after f3; +select * from t1; + +-- 2.2.1 unique index in a table without data, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int unique first, add f7 float unique after f3; +\d+ t1 + +-- 2.2.2 unique index in a table with data, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int unique first, add f7 float unique after f3; +select * from t1; + +-- 2.3.1 unique index in a table without data, drop unique index, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3; +\d+ t1 + +-- 2.3.2 unique index in a table with data, drop unique index, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3; +select * from t1; + +-- 2.4.1 unique index in a table without data, drop unique index, add column with unique index and default +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int unique default 6 first; +alter table t1 drop f1, add f7 float unique default 7 after f3; +\d+ t1 + +-- 2.4.2 unique index in a table with data, drop unique index, add column with unique index and default +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 int unique default 6 first; +alter table t1 drop f1; +-- error +alter table t1 add f7 float unique default 7 after f3; +select * from t1; + +-- 3 default and generated column +-- 3.1.1 default in a table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int first, add f7 float after f3; +\d+ t1 + +-- 3.1.2 default in a table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int first, add f7 float after f3; +select * from t1; + +-- 3.2.1 default in a table without data, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int default 6 first, add f7 float default 7 after f3; +\d+ t1 + +-- 3.2.2 default in a table with data, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int default 6 first, add f7 float default 7 after f3; +select * from t1; + +-- 3.3.1 default in a table without data, drop default, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 int default 6 first, add f7 float default 7 after f3; +\d+ t1 + +-- 3.3.2 default in a table with data, drop default, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3; +select * from t1; + +-- 3.4.1 generated column in a table without data, drop generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5; +\d+ t1 + +-- 3.4.1 generated column in a table with data, drop generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1, 2, 3), (11, 22, 33); +alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5; +select * from t1; + +-- 3.5.1 generated column in a table without data, add generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f2 + f3) stored after f5; +\d+ t1; + +-- 3.5.2 generated column in table with data, add generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1, 2, 3), (11, 22, 33); +alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5; +select * from t1; + +-- 4 auto_increment +-- 4.1.1 auto_increment in a table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 text first, add f7 float after f3; +\d+ t1 + +-- 4.1.2 auto_increment in a table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 text first, add f7 float after f3; +select * from t1; + +-- 4.2.1 auto_increment in a table without data, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +-- error +alter table t1 add f6 int primary key auto_increment first; +-- error +alter table t1 add f7 int primary key auto_increment after f3; +\d+ t1 + +-- 4.2.2 auto_increment in a table with data, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 int primary key auto_increment first; +-- error +alter table t1 add f7 int primary key auto_increment after f3; +select * from t1; + +-- 4.3.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 int primary key auto_increment first; +\d+ t1 + +-- 4.3.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int primary key auto_increment first; + +-- 4.4.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment and default +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1; +-- error +alter table t1 add f6 int primary key auto_increment default 6 first; +\d+ t1 + +-- 4.4.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment and default +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1; +-- error +alter table t1 add f6 int primary key auto_increment default 6 first; +select * from t1; + +-- 5 NULL and NOT NULL +-- 5.1.1 null and not null in a table without data, add column without constraints +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 text first, add f7 float after f3; +\d+ t1 + +-- 5.1.2 null and not null in a table with data, add column without constraints +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 text first, add f7 float after f3; +select * from t1; + +-- 5.2.1 null and not null in table without data, add column with null or not null +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int null first; +alter table t1 add f7 float not null after f3; +\d+ t1 + +-- 5.2.2 null and not null in a table with data, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int null first; +-- error +alter table t1 add f7 float not null after f3; +select * from t1; + +-- 5.3.1 null and not null in a table without data, drop null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, add f6 int null first, add f7 float not null after f3; +\d+ t1 + +-- 5.3.2 null and not null in a table with data, drop null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int null first; +-- error +alter table t1 add f7 float not null after f3; +select * from t1; + +-- 5.4.1 null and not null in a table without data, drop null and not null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3; +\d+ t1 + +-- 5.4.2 null and not null in a table without data, drop null and not null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, drop f2, add f6 int null first; +-- error +alter table t1 add f7 float not null after f3; +select * from t1; + +-- 6 check constraint +-- 6.1.1 check constraint in a table without data, add column without constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 text first, add f7 float after f3; +\d+ t1 + +-- 6.1.2 check constraint in a table with data, add column without constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 text first, add f7 float after f3; +select * from t1; + +-- 6.2.1 check constraint in a table without data, add column with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7); +\d+ t1 + +-- 6.2.2 check constraint in a table with data, add column with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7) after f3; +select * from t1; + +-- 6.3.1 check constraint in a table without data, drop check, add column with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check (f7 - f1 > 0) after f3; +\d+ t1 + +-- 6.3.2 check constraint in a table with data, drop check, add column with with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3; +select * from t1; + +-- 7 foreign key +-- 7.1.1 foreign key constraint in a table without data, add column without constraint +drop table if exists t_pri1 cascade; +create table t_pri1(f1 int, f2 int primary key); +drop table if exists t1 cascade; +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 add f4 int, add f5 text first, f6 float after f2; +\d+ t1 + +-- 7.1.2 foreign key constraint in a table with data, add column without constraint +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t1(f1 text, f2 int references t_pri1(f2), f3 bool); +insert into t1 values('a', 1, true), ('b', 2, false); +alter table t1 add f4 int, add f5 text first, f6 float after f2; +select * from t1; + +-- 7.2.1 foreign key constraint in a table without data, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +create table t_pri2(f1 int, f2 int, f4 int primary key); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 add f4 int references t_pri2(f4) first; +\d+ t1 +alter table t1 drop f4, add f4 int references t_pri2(f4) after f2; +\d+ t1 + +-- 7.2.2 foreign key constraint in a table with data, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t_pri2(f1 int, f2 bool, f4 int primary key); +insert into t_pri2 values(11, true, 1), (22, false, 2); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 1, true), (2, 2, false); +alter table t1 add f4 int references t_pri2(f4) first; +select * from t1; +alter table t1 drop f4, add f4 int references t_pri2(f4) after f2; +select * from t1; + +-- 7.3.1 foreign key constraint in a table without data, drop foreign key, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 int, f2 int primary key); +create table t_pri2(f1 int, f2 int, f4 int primary key); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 drop f2, add f4 int references t_pri2(f4) first; +\d+ t1 +alter table t1 drop f4, add f4 int references t_pri2(f4) after f1; +\d+ t1 + +-- 7.3.2 foreign key constraint in a table with data, drop foreign key, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t_pri2(f1 text, f2 bool, f4 int primary key); +insert into t_pri2 values('a', true, 1), ('b', false, 2); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 2, true), (2, 2, false); +alter table t1 drop f2, add f4 int references t_pri2(f4) first; +select * from t1; +alter table t1 drop f4, add f4 int references t_pri2(f4) after f1; +select * from t1; + +-- partition table +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +-- subpartition table +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +-- test pg_partition +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +alter table range_range add f1 int default 1 first, add f2 text after id; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +select * from range_range; + + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (orientation = column); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 text first; +-- error +alter table t1 add f6 text after f1; + +-- pg_index test +drop table if exists t1 cascade; +create table t1 +( + f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int, + primary key(f1, f2), + unique((lower(f3)), (abs(f4))), + check(f5 = 10) +); +create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0; + +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +alter table t1 add f8 int first, add f9 int unique after f1; +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +-- pg_attribute test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 add f4 int default 4 first; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 drop f2, add f5 int default 5 after f1; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +-- pg_attrdef test +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored); +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 add f5 text default 'aaa' first; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1; -- ERROR + +-- pg_rewrite test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +insert into t1 values(1, 2, 3), (11, 22, 33); +create view t1_view1 as select * from t1; +create view t1_view2 as select f1, f2 from t1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; + +-- pg_trigger test +drop table if exists t1 cascade; +create table t1(f1 boolean not null, f2 text, f3 int, f4 date); +alter table t1 add primary key(f1); +create or replace function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; + +drop trigger if exists f1_trig_update on t1; +drop trigger if exists f1_trig_insert on t1; + +create trigger f1_trig_update after update of f1 on t1 for each row + when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); +create trigger f1_trig_insert after insert on t1 for each row + when (not new.f1) execute procedure dummy_update_func('insert'); + +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +alter table t1 add f5 int after f1, add f6 boolean first; +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +-- pg_rlspolicy test +drop table if exists t1 cascade; +drop role if exists test_rlspolicy2; +create role test_rlspolicy2 nologin password 'Gauss_234'; +create table t1 (f1 int, f2 int, f3 text) partition by range (f1) +( + partition t1_p0 values less than(10), + partition t1_p1 values less than(50), + partition t1_p2 values less than(100), + partition t1_p3 values less than(MAXVALUE) +); + +INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei'); +grant select on t1 to public; + +create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20); +create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy2 using (f1 < 30); + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first; +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +-- expression test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text); +insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei'); +-- T_FuncExpr +create index t1_idx1 on t1(abs(f1), f2); +-- T_OpExpr +create index t1_idx2 on t1((f1 + f2), (f1 - f3)); +-- T_BooleanTest +create index t1_idx3 on t1((f4 is true)); +-- T_CaseExpr and T_CaseWhen +create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end)); +-- T_ArrayExpr +create index t1_idx5 on t1((array[f1, f2, f3])); +-- T_TypeCast +create index t1_idx6 on t1(((f1 + f2 + 1) :: text)); +-- T_BoolExpr +create index t1_idx7 on t1((f1 and f2), (f2 or f3)); +-- T_ArrayRef +create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1])); +-- T_ScalarArrayOpExpr +create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10]))); +-- T_RowCompareExpr +create index t1_idx10 on t1((row(f1, f5) < row(f2, f6))); +-- T_MinMaxExpr +create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3)); +-- T_RowExpr +drop table if exists mytable cascade; +create table mytable(f1 int, f2 int, f3 text); +-- create function getf1(mytable) returns int as 'select $1.f1' language sql; +-- create index t1_idx12 on t1(getf1(row(f1, 2, 'a'))); +-- T_CoalesceExpr +create index t1_idx13 on t1(nvl(f1, f2)); +-- T_NullTest +create index t1_idx14 on t1((f1 is null)); +-- T_ScalarArrayOpExpr +create index t1_idx16 on t1((f1 in (1,2,3))); +-- T_NullIfExpr +create index t1_idx17 on t1(nullif(f5,f6)); +-- T_RelabelType +alter table t1 add f7 oid; +create index t1_idx18 on t1((f7::int4)); +-- T_CoerceViaIO +alter table t1 add f8 json; +create index t1_idx19 on t1((f8::jsonb)); +-- T_ArrayCoerceExpr +alter table t1 add f9 float[]; +create index t1_idx20 on t1((f9::int[])); +-- T_PrefixKey +create index t1_idx21 on t1(f6(5)); + +\d+ t1 +select * from t1; + +alter table t1 add f10 int primary key auto_increment after f4, + add f11 int generated always as (f1 + f2) stored after f1, + add f12 date default '2023-01-05' first, + add f13 int not null default 13 first; + +\d+ t1 +select * from t1; + +-- test modify column ... first | after column in astore table +-- ASTORE table +-- common scenatios +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4, modify f5 bool after f2; +\d+ t1 +select * from t1; +alter table t1 modify + +-- 1 primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +-- 2 unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +-- 3 default and generated column +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first; +\d+ t1 +alter table t1 drop f1; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1,2,3),(11,22,33); +alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first; +\d+ t1 +select * from t1; +alter table t1 drop f1; +\d+ t1 +select * from t1; + +-- 4 auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +\d+ t1 +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', true), ('b', '2022-11-09 19:56:10.158564', x'42', false); +\d+ t1 +select * from t1; +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +insert into t1(f3, f2, f4, f5, f1) values('2022-11-10 19:56:10.158564', 'c', x'43', false, 3); +select f1 from t1; + +-- 5 NULL and NOT NULL +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +alter table t1 modify f2 varchar(20) after f3; +\d+ t1 + +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; +alter table t1 modify f2 varchar(20) after f3; +\d+ t1 +select * from t1; + +-- 6 check constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +-- 7 foreign key +drop table if exists t_pri1 cascade; +create table t_pri1(f1 int, f2 int primary key); +drop table if exists t1 cascade; +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 modify f2 int first; +\d+ t1 +alter table t1 modify f2 int after f3; +\d+ t1 + +drop table if exists t_pri1 cascade; +create table t_pri1(f1 int, f2 int primary key); +insert into t_pri1 values(1,1),(2,2); +drop table if exists t1 cascade; +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 1, true), (2, 2, false); +alter table t1 modify f2 int first; +\d+ t1 +select * from t1; +alter table t1 modify f2 int after f3; +\d+ t1 +select * from t1; + +-- partition table +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int, primary key (f1, f2)) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first; +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 modify f1 int after f2; +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +-- modify operation before add +alter table t1 add f4 int after f2, modify f1 int after f2; +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); + +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int, primary key (f1, f2)) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +insert into t1 values(9, -1, 1), (19, -1, 2), (29, -1, 3); +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +alter table t1 modify f1 int after f2; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +alter table t1 add f4 int after f2, modify f1 int after f2; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +-- subpartition table +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday)) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify birthday date first, modify id int after gender; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + + +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday)) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); + +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify birthday date first, modify id int after gender; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +select * from range_range; + +-- pg_index test +drop table if exists t1 cascade; +create table t1 +( + f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int, + primary key(f1, f2), + unique((lower(f3)), (abs(f4))), + check(f5 = 10) +); +create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0; + +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1'); + +alter table t1 modify f1 int after f2, modify f4 int after f6, modify f5 int first; +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1'); + +-- pg_attribute test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 modify f3 int first, modify f1 int after f2; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +-- pg_attrdef test +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored); +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 modify f3 int first, modify f1 int after f4, modify f4 int first; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +-- pg_depend test +drop table if exists t1 cascade; +create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored, f4 int, unique ((abs(f4)))); +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +alter table t1 modify f4 int first, modify f3 int after f1, modify f1 int after f2; +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +-- pg_partition test +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify gender varchar after birthday; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify birthday date first, modify id int after gender; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + + +-- pg_rewrite test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, f4 int); +insert into t1 values(1, 2, 3, 4), (11, 22, 33, 44); +create view t1_view1 as select * from t1; +create view t1_view2 as select f1, f4 from t1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; +alter table t1 modify f2 int first, modify f1 int after f4, add f5 int after f4; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; + +-- pg_trigger test +drop table if exists t1 cascade; +create table t1(f1 boolean not null, f2 text, f3 int, f4 date); +alter table t1 add primary key(f1); +create or replace function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; +drop function dummy_update_func; +drop trigger if exists f1_trig_update on t1; +drop trigger if exists f1_trig_insert on t1; + +create trigger f1_trig_update after update of f1 on t1 for each row + when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); +create trigger f1_trig_insert after insert on t1 for each row + when (not new.f1) execute procedure dummy_update_func('insert'); + +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +alter table t1 modify f3 int first, modify f1 boolean after f4; +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +-- pg_rlspolicy test +drop table if exists t1 cascade; +drop role if exists test_rlspolicy3; +create role test_rlspolicy3 nologin password 'Gauss_234'; +create table t1 (f1 int, f2 int, f3 text) partition by range (f1) +( + partition t1_p0 values less than(10), + partition t1_p1 values less than(50), + partition t1_p2 values less than(100), + partition t1_p3 values less than(MAXVALUE) +); + +INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei'); +grant select on t1 to public; + +create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20); +create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy3 using (f1 < 30); + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +alter table t1 modify f2 int first, modify f1 int after f3; + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + + +-- expression test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text); +insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei'); +-- T_FuncExpr +create index t1_idx1 on t1(abs(f1), f2); +-- T_OpExpr +create index t1_idx2 on t1((f1 + f2), (f1 - f3)); +-- T_BooleanTest +create index t1_idx3 on t1((f4 is true)); +-- T_CaseExpr and T_CaseWhen +create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end)); +-- T_ArrayExpr +create index t1_idx5 on t1((array[f1, f2, f3])); +-- T_TypeCast +create index t1_idx6 on t1(((f1 + f2 + 1) :: text)); +-- T_BoolExpr +create index t1_idx7 on t1((f1 and f2), (f2 or f3)); +-- T_ArrayRef +create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1])); +-- T_ScalarArrayOpExpr +create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10]))); +-- T_RowCompareExpr +create index t1_idx10 on t1((row(f1, f5) < row(f2, f6))); +-- T_MinMaxExpr +create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3)); +-- T_RowExpr +drop table if exists mytable cascade; +create table mytable(f1 int, f2 int, f3 text); +-- create function getf1(mytable) returns int as 'select $1.f1' language sql; +-- create index t1_idx12 on t1(getf1(row(f1, 2, 'a'))); +-- T_CoalesceExpr +create index t1_idx13 on t1(nvl(f1, f2)); +-- T_NullTest +create index t1_idx14 on t1((f1 is null)); +-- T_ScalarArrayOpExpr +create index t1_idx16 on t1((f1 in (1,2,3))); +-- T_NullIfExpr +create index t1_idx17 on t1(nullif(f5,f6)); +-- T_RelabelType +alter table t1 add f7 oid; +create index t1_idx18 on t1((f7::int4)); +-- T_CoerceViaIO +alter table t1 add f8 json; +create index t1_idx19 on t1((f8::jsonb)); +-- T_ArrayCoerceExpr +alter table t1 add f9 float[]; +create index t1_idx20 on t1((f9::int[])); + +\d+ t1 +select * from t1; + +alter table t1 modify f8 json first, modify f2 int after f6, modify f7 oid after f3; + +\d+ t1 +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 int); +insert into t1 values(1,2); +alter table t1 add f3 int default 3, add f4 int default 4 after f3, add f5 int default 5, add f6 int default 6 after f3; +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 int); +insert into t1 values(1,2); +alter table t1 add f3 int default 3, add f4 int default 4 after f1, add f5 int default 5, add f6 int default 6 after f5; +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 int); +insert into t1 values(1,2); +alter table t1 add f3 int, add f4 int after f3, add f5 int, add f6 int first; +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); + +alter table t1 drop f5, + add f6 int default 6 , add f7 int first, add f8 int default 8 after f3, + modify f3 timestamp first, modify f6 int after f2, modify f1 text, modify f2 text after f4; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, primary key(f1, f3)); +insert into t1 values(1, 2, 3), (11, 22, 33); +\d+ t1 +select * from t1; +alter table t1 modify f3 int first, modify f1 int after f2; +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +insert into t1 values(1, 2, 3), (11, 12, 13), (21, 22, 23); +select * from t1; +alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 * 10) stored first; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2)); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; + +alter table t1 drop f4, + add f7 int default 7 , add f8 int first, add f9 int default 9 after f3, + modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5, + add f10 timestamp generated always as (f3) stored after f3, + add f11 int generated always as (f1 * 100) stored first; + +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 int, primary key(f1, f3)); +insert into t1 values(1, 'a', 1), (2, 'b', 2); +\d+ t1 +select * from t1; + +alter table t1 modify f1 text after f3, add f10 int default 10 after f2; +\d+ t1 +select * from t1; + +-- unlogged table +drop table if exists t1 cascade; +create unlogged table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2)); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +\d+ t1 +select * from t1; + +alter table t1 drop f4, + add f7 int default 7 , add f8 int first, add f9 int default 9 after f3, + modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5, + add f10 timestamp generated always as (f3) stored after f3, + add f11 int generated always as (f1 * 100) stored first; + +\d+ t1 +select * from t1; + +-- temp table +drop table if exists t1 cascade; +create temp table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2)); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; + +alter table t1 drop f4, + add f7 int default 7 , add f8 int first, add f9 int default 9 after f3, + modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5, + add f10 timestamp generated always as (f3) stored after f3, + add f11 int generated always as (f1 * 100) stored first; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 SET('beijing','shanghai','nanjing','wuhan')); +insert into t1 values(1, 'shanghai,beijing'), (2, 'wuhan'); +\d+ t1 +select * from t1; +alter table t1 add f3 int default 3 first, add f4 int default 4 after f3, + add f5 SET('beijing','shanghai','nanjing','wuhan') default 'nanjing' first; +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 SET('beijing','shanghai','nanjing','wuhan')); +-- error +alter table t1 modify f2 SET('beijing','shanghai','nanjing','wuhan') first; +alter table t1 modify f2 SET('beijing','shanghai','nanjing','wuhan') after f1; + +drop table if exists t1 cascade; + +--DTS +drop table if exists unit cascade; +CREATE TABLE unit +( + f11 INTEGER CHECK (f11 >=2), + f12 bool, + f13 text, + f14 varchar(20), + primary key (f11,f12) +); + +insert into unit values(2,3,4,5); +insert into unit values(3,4,5,6); +ALTER TABLE unit ADD f1 int CHECK (f1 >=10) FIRST; +insert into unit values (10,6,1,1,1); +insert into unit values (11,7,1,1,1); +ALTER TABLE unit ADD f2 int CHECK (f2 >=10) after f11; +select * from unit; +ALTER TABLE unit MODIFY f12 int FIRST; +select * from unit; +drop table if exists unit cascade; + +-- dts for set +drop table if exists test_s1 cascade; +create table test_s1 (c1 int,c2 SET('aaa','bbb','ccc'), c3 bool, primary key(c1)); +insert into test_s1 values(1,2,1), (2,'aaa',3), (3,4,4), (4,5,5), (5,1,6), (6,3,7); +alter table test_s1 add f1 text after c1; +alter table test_s1 modify c2 int first; +select * from test_s1; +drop table if exists test_s1 cascade; + +drop table if exists test_s2 cascade; +create table test_s2 (c1 int,c2 SET('aaa','bbb','ccc'), c3 bool, primary key(c1)); +insert into test_s2 values(1,2,1), (2,'aaa',3), (3,4,4), (4,5,5), (5,1,6), (6,3,7); +alter table test_s2 add f1 text check(f1 >= 2) after c1; +alter table test_s2 add f2 SET('w','ww','www','wwww') first; +alter table test_s2 modify f2 text after c1; +alter table test_s2 modify c2 int first; +select * from test_s2; +drop table if exists test_s2 cascade; + +drop table if exists t1 cascade; +create table t1(f1 set('aaa','bbb','ccc'), f2 set('1','2','3'), f3 set('beijing','shannghai','nanjing'), + f4 set('aaa','bbb','ccc') generated always as(f1+f2+f3) stored, + f5 set('1','2','3') generated always as(f1+f2+f3) stored, + f6 set('beijing','shannghai','nanjing') generated always as(f1+f2+f3) stored); +\d+ t1 +alter table t1 modify f1 int after f6; +\d+ t1 +alter table t1 drop f1; +\d+ t1 +drop table if exists t1 cascade; + +drop table t1 cascade; +create table t1(f1 int, f2 text, f3 int, f4 bool, f5 int generated always as (f1 + f3) stored); +insert into t1 values(1, 'aaa', 3, true); +insert into t1 values(11, 'bbb', 33, false); +insert into t1 values(111, 'ccc', 333, true); +insert into t1 values(1111, 'ddd', 3333, true); + +create view t1_view1 as select * from t1; +select * from t1_view1; +alter table t1 modify f1 int after f2, modify f3 int first; +drop view t1_view1; +create view t1_view1 as select * from t1; +alter table t1 modify f1 int after f2, modify f3 int first; +drop table t1 cascade; + +create table t1(f1 int, f2 text, f3 int, f4 bigint, f5 int generated always as (f1 + f3) stored); +insert into t1 values(1, 'aaa', 3, 1); +insert into t1 values(11, 'bbb', 33, 2); +insert into t1 values(111, 'ccc', 333, 3); +insert into t1 values(1111, 'ddd', 3333, 4); + +create view t1_view1 as select * from t1; +select * from t1_view1; +alter table t1 add f6 int first, add f7 int after f4, modify f1 int after f2, modify f3 int first; +select * from t1_view1; +drop view t1_view1; + +create view t1_view2 as select f1, f3, f5 from t1 where f2='aaa'; +select * from t1_view2; +alter table t1 add f8 int first, add f9 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view2; +drop view t1_view2; + +create view t1_view3 as select * from (select f1+f3, f5 from t1); +select * from t1_view3; +alter table t1 add f10 int first, add f11 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view3; +drop view t1_view3; + +create view t1_view4 as select * from (select abs(f1+f3) as col1, abs(f5) as col2 from t1); +select * from t1_view4; +alter table t1 add f12 int first, add f13 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view4; +drop view t1_view4; + +create view t1_view5 as select * from (select * from t1); +select * from t1_view5; +alter table t1 add f14 int first, add f15 int after f4, modify f1 int after f2, modify f3 int first; +select * from t1_view5; +drop view t1_view5; + +create view t1_view6 as select f1, f3, f5 from t1 where f2='aaa'; +select * from t1_view6; +alter table t1 modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view6; +drop view t1_view6; +drop table t1 cascade; + +-- dts for add +drop table if exists test_d; +create table test_d (f2 int primary key, f3 bool, f5 text); +insert into test_d values(1,2,3), (2,3,4), (3,4,5); +select * from test_d; +alter table test_d add f1 int default 1,add f11 text check (f11 >=2) first; +select * from test_d; + +drop table if exists test_d; +create table test_d (f2 int primary key, f3 bool, f5 text); +insert into test_d values(1,2,3), (2,3,4), (3,4,5); +select * from test_d; +alter table test_d add f1 int default 1; +alter table test_d add f11 text check (f11 >=2) first; +select * from test_d; +drop table if exists test_d; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; +alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '777' first, + add f8 int default 8, add f9 int primary key auto_increment after f6; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; +alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '7' first, + add f8 int default 8, add f9 int primary key auto_increment after f1, + add f10 bool default true, add f11 timestamp after f2, + add f12 text after f3, add f14 int default '14', add f15 int default 15 check(f15 = 15) after f9; +select * from t1; +drop table if exists t1 cascade; + +drop table if exists t1 cascade; +create table t1(f1 int comment 'f1 is int', f2 varchar(20), f3 timestamp comment 'f3 is timestamp', f4 bit(8), f5 bool comment 'f5 is boolean'); +SELECT pg_get_tabledef('t1'); +alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '7' first, add f8 int primary key auto_increment after f2; +SELECT pg_get_tabledef('t1'); +alter table t1 modify f1 int after f3, modify f5 bool first, modify f3 timestamp after f4; +SELECT pg_get_tabledef('t1'); +drop table if exists t1 cascade; + + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int2 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 boolean primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int8 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 float4 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 float8 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 boolean primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int1 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int2 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int4 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int8 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 float4 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 float8 primary key auto_increment not null; +drop table if exists t1 cascade; + + + + +-- \c postgres +-- drop database test_first_after_B; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_001.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_001.sql new file mode 100644 index 0000000000..9a52816f56 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_001.sql @@ -0,0 +1,785 @@ +create table altertable_rangeparttable +( + c1 int, + c2 float, + c3 real, + c4 text +) +partition by range (c1, c2, c3, c4) +( + partition altertable_rangeparttable_p1 values less than (10, 10.00, 19.156, 'h'), + partition altertable_rangeparttable_p2 values less than (20, 20.89, 23.75, 'k'), + partition altertable_rangeparttable_p3 values less than (30, 30.45, 32.706, 's') +); + +alter table altertable_rangeparttable add partition altertable_rangeparttable_p4 values less than (36, 45.25, 37.39, 'u'); + +create table altertable_rangeparttable2 +( + c1 int, + c2 float, + c3 real, + c4 text +) +partition by range (abs(c1)) +( + partition altertable_rangeparttable_p1 values less than (10), + partition altertable_rangeparttable_p2 values less than (20), + partition altertable_rangeparttable_p3 values less than (30) +); +alter table altertable_rangeparttable2 add partition altertable_rangeparttable_p4 values less than (36); + + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +alter table range_range add partition p_202001 values less than ('202002') (subpartition p_202001_a values less than('2') , subpartition p_202001_b values less than('3') ); + +-- comes from function_get_table_def.sql +create table table_range4 (id int primary key, a date, b varchar) +partition by range (id) +( + partition table_range4_p1 start (10) end (40) every (10), + partition table_range4_p2 end (70), + partition table_range4_p3 start (70), + partition table_range4_p4 start (100) end (150) every (20) +); + +alter table table_range4 add partition table_range4_p5 start (150) end (300) every (20); +alter table table_range4 add partition table_range4_p6 values less than (310), add partition table_range4_p7 values less than (320); + +create table table_interval1 (id int, a date, b varchar) +partition by range (a) +interval ('1 day') +( + partition table_interval1_p1 values less than('2020-03-01'), + partition table_interval1_p2 values less than('2020-05-01'), + partition table_interval1_p3 values less than('2020-07-01'), + partition table_interval1_p4 values less than(maxvalue) +); +alter table table_interval1 add partition table_interval1_p5 start ('2020-08-01') end ('2020-09-01'); + +create table table_list1 (id int, a date, b varchar) +partition by list (id) +( + partition table_list1_p1 values (1, 2, 3, 4), + partition table_list1_p2 values (5, 6, 7, 8), + partition table_list1_p3 values (9, 10, 11, 12) +); +alter table table_list1 add partition table_list1_p4 values (13, 14, 15, 16); +alter table table_list1 add partition table_list1_p5 values (default); + +create table table_list2 (id int, a date, b varchar) +partition by list (b) +( + partition table_list2_p1 values ('1', '2', '3', '4'), + partition table_list2_p2 values ('5', '6', '7', '8'), + partition table_list2_p3 values ('9', '10', '11', '12') +); +alter table table_list2 add partition table_list2_p4 values ('13', '14', '15', '16'); +alter table table_list2 add partition table_list2_p5 values ('DEFAULT'); +alter table table_list2 add partition table_list2_p6 values ('default'); +alter table table_list2 add partition table_list2_p7 values (default); + + +create table table_list3 (id int, a date, b varchar) +partition by list (id, b) +( + partition table_list3_p1 values ((1, 'a'), (2,'b'), (3,'c'), (4,'d')) , + partition table_list3_p2 values ((5, 'a'), (6,'b'), (7,'c'), (8,'d')) + +); +alter table table_list3 add partition table_list3_p3 values ((15, 'a'), (16,'b'), (17,'c'), (18,'d')); +alter table table_list3 add partition table_list3_p4 values (default); + +create table table_hash1 (id int, a date, b varchar) +partition by hash (id) +( + partition table_hash1_p1, + partition table_hash1_p2, + partition table_hash1_p3 +); + + +CREATE TABLE list_hash_2 ( + col_1 integer primary key, + col_2 integer, + col_3 character varying(30) unique, + col_4 integer +) +WITH (orientation=row, compression=no) +PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + ( + SUBPARTITION p_hash_1_1, + SUBPARTITION p_hash_1_2, + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_hash_3_1, + SUBPARTITION p_hash_3_2 + ), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_hash_4_1, + SUBPARTITION p_hash_4_2, + SUBPARTITION p_hash_4_3, + SUBPARTITION p_hash_4_4, + SUBPARTITION p_hash_4_5 + ), + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40), + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_hash_6_1, + SUBPARTITION p_hash_6_2, + SUBPARTITION p_hash_6_3, + SUBPARTITION p_hash_6_4, + SUBPARTITION p_hash_6_5 + ), + PARTITION p_list_7 VALUES (DEFAULT) +); + +alter table list_hash_2 add partition p_list_8 values (51,52,53,54,55,56,57,58,59,60) (subpartition p_hash_8_1, subpartition p_hash_8_2, subpartition p_hash_8_3); + +-- drop table table_list3; +create table table_list3 (id int, a date, b varchar) +partition by list (id, b) +( + partition table_list3_p1 values ((1, 'a'), (2,'b'), (3,'c'), (4,'d')) , + partition table_list3_p2 values ((5,'a'), (6,'b'), (7,'NULL'), (8,NULL)) +); + +alter table table_list3 add partition table_list3_p3 values ((15, 'a'), (16,'default'), (17,'NULL'), (18,NULL)); + +alter table table_list3 add partition table_list3_p4 values (default); + +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); + +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); + +CREATE TABLE range2_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id, product_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01', 200), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01', 500), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01', 800), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01', 1200) +); + +INSERT INTO range2_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range2_sales_idx ON range2_sales(product_id) LOCAL; + +ALTER TABLE range2_sales TRUNCATE PARTITION time_2008; +ALTER TABLE range2_sales TRUNCATE PARTITION FOR VALUES('2011-04-01', 700) ; + +ALTER TABLE range2_sales DROP PARTITION time_2009; +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01', 600); + +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(100), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); + +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X', 'A', 'bbb'); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel6 VALUES ('NULL', 'asdasd', 'hahaha'); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel7 VALUES (NULL); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel8 VALUES ('DEFAULT', 'wawawa'); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel9 VALUES (DEFAULT); +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel9; + +ALTER TABLE range_list_sales SPLIT partition customer4 INTO ( + partition customer4_p1 values less than (900) + ( + subpartition customer4_p1_s1 VALUES ('11'), + subpartition customer4_p1_s2 VALUES ('12') + ), + partition customer4_p2 values less than (1000) + ( + subpartition customer4_p2_s1 VALUES ('11'), + subpartition customer4_p2_s2 VALUES ('12') + ) +); + +ALTER TABLE range_list_sales truncate partition customer2 update global index; +ALTER TABLE range_list_sales truncate partition for (300); +ALTER TABLE range_list_sales truncate partition customer5_channel3; + +ALTER TABLE range_list_sales DROP PARTITION customer2; +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; + + +create table test_list (col1 int, col2 int) +partition by list(col1) +( +partition p1 values (2000), +partition p2 values (3000), +partition p3 values (4000), +partition p4 values (5000) +); + +INSERT INTO test_list VALUES(2000, 2000); +INSERT INTO test_list VALUES(3000, 3000); +alter table test_list add partition p5 values (6000); +INSERT INTO test_list VALUES(6000, 6000); + +create table t1 (col1 int, col2 int); + +alter table test_list exchange partition (p1) with table t1 VERBOSE; +alter table test_list truncate partition p2; +alter table test_list drop partition p5; + + +create table test_hash (col1 int, col2 int) +partition by hash(col1) +( +partition p1, +partition p2 +); + +INSERT INTO test_hash VALUES(1, 1); +INSERT INTO test_hash VALUES(2, 2); +INSERT INTO test_hash VALUES(3, 3); +INSERT INTO test_hash VALUES(4, 4); + +alter table test_hash exchange partition (p1) with table t1 WITHOUT VALIDATION; + +alter table test_hash truncate partition p2; + + +CREATE TABLE interval_sales +( + prod_id NUMBER(6), + cust_id NUMBER, + time_id DATE, + channel_id CHAR(1), + promo_id NUMBER(6), + quantity_sold NUMBER(3), + amount_sold NUMBER(10, 2) +) + PARTITION BY RANGE (time_id) + INTERVAL + ('1 MONTH') +( + PARTITION p0 VALUES LESS THAN (TO_DATE('1-1-2008', 'DD-MM-YYYY')), + PARTITION p1 VALUES LESS THAN (TO_DATE('6-5-2008', 'DD-MM-YYYY')) +); + +alter table interval_sales split partition p0 at (to_date('2007-02-10', 'YYYY-MM-DD')) into (partition p0_1, partition p0_2); + +alter table interval_sales split partition p0_1 into (partition p0_1_1 values less than (TO_DATE('1-1-2005', 'DD-MM-YYYY')), partition p0_1_2 values less than(TO_DATE('1-1-2006', 'DD-MM-YYYY')) ); + +alter table interval_sales split partition p0_2 into (partition p0_2_1 START (TO_DATE('8-5-2007', 'DD-MM-YYYY'), partition p0_2_2 START (TO_DATE('9-5-2007', 'DD-MM-YYYY')); + + +insert into interval_sales +values (1, 1, to_date('9-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('11-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('11-2-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('20-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('05-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('08-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('05-4-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('05-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-9-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-11-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-12-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-01-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-5-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-6-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-7-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-9-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); + +alter table interval_sales merge partitions p0_1, p0_2, p1 into partition p01; +alter table interval_sales merge partitions sys_p6, sys_p7, sys_p8 into partition sys_p6_p7_p8; +ALTER TABLE interval_sales RESET PARTITION; + +CREATE TABLE interval_sales1 +( + prod_id NUMBER(6), + cust_id NUMBER, + time_id DATE, + channel_id CHAR(1), + promo_id NUMBER(6), + quantity_sold NUMBER(3), + amount_sold NUMBER(10, 2) +) + PARTITION BY RANGE (time_id) + INTERVAL +('1 MONTH') +(PARTITION p0 VALUES LESS THAN (TO_DATE('1-1-2008', 'DD-MM-YYYY')), + PARTITION p1 VALUES LESS THAN (TO_DATE('6-5-2008', 'DD-MM-YYYY')) +); +create index interval_sales1_time_id_idx on interval_sales1 (time_id) local; +create index interval_sales1_quantity_sold_idx on interval_sales1 (quantity_sold) local; +alter table interval_sales1 split partition p0 at (to_date('2007-02-10', 'YYYY-MM-DD')) into (partition p0_1, partition p0_2); + +insert into interval_sales1 +values (1, 1, to_date('9-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('11-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('11-2-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('20-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('05-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('08-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('05-4-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('05-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-9-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-11-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-12-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-01-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-5-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-6-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-7-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-9-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); + +alter table interval_sales1 merge partitions p0_1, p0_2, p1 into partition p01 UPDATE GLOBAL INDEX; + + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); + +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); + +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); + +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); + +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); + + +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL; +CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL; +EXECUTE partition_get_partitionno('range_sales'); +ALTER TABLE range_sales ADD PARTITION time_default VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_sales DROP PARTITION time_2008; +ALTER TABLE range_sales SPLIT PARTITION time_default AT ('2013-01-01') INTO (PARTITION time_2012, PARTITION time_default_temp); +ALTER TABLE range_sales RENAME PARTITION time_default_temp TO time_default; +ALTER TABLE range_sales MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX; +ALTER TABLE range_sales TRUNCATE PARTITION time_2011 UPDATE GLOBAL INDEX; +ALTER TABLE range_sales disable row movement; +ALTER TABLE range_sales enable row movement; + +CREATE TABLE interval_sales2 +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2)DEFAULT CHARACTER SET +) +PARTITION BY RANGE (time_id) INTERVAL ('1 year') +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01') +); +CREATE INDEX interval_sales2_idx1 ON interval_sales2(product_id) LOCAL; +CREATE INDEX interval_sales2_idx2 ON interval_sales2(time_id) GLOBAL; + +-- add/drop partition +INSERT INTO interval_sales2 VALUES (1,1,'2013-01-01','A',1,1,1); +INSERT INTO interval_sales2 VALUES (2,2,'2012-01-01','B',2,2,2); +ALTER TABLE interval_sales2 DROP PARTITION time_2008; + + +-- merge/split partition +ALTER TABLE interval_sales2 SPLIT PARTITION time_2009 AT ('2009-01-01') INTO (PARTITION time_2008, PARTITION time_2009_temp); +ALTER TABLE interval_sales2 RENAME PARTITION time_2009_temp TO time_2009; +ALTER TABLE interval_sales2 MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX; + + +-- truncate partition with gpi +ALTER TABLE interval_sales2 TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX; + + +--reset +ALTER TABLE interval_sales2 RESET PARTITION; +ALTER TABLE interval_sales2 disable row movement; + +create table unit_varchar(a1 varchar default '1', a2 varchar(2), a3 varchar(2 byte) default 'ye', a4 varchar(2 character) default '', a5 varchar(2 char) default 'Ĭ'); +create table unit_varchar2(a1 varchar2 default '1', a2 varchar2(2) default 'ha', a3 varchar2(2 byte), a4 varchar2(2 character) default '', a5 varchar2(2 char) default 'Ĭ'); +create table unit_char(a1 char default '1', a2 char(2) default 'ha', a3 char(2 byte) default 'ye', a4 char(2 character), a5 char(2 char) default 'Ĭ'); +create table unit_nchar(a1 nchar default '1', a2 nchar(2) default 'ha', a3 nchar(2) default 'ye', a4 nchar(2) default '', a5 nchar(2)); +create table unit_nvarchar2(a1 nvarchar2 default '1', a2 nvarchar2(2) default 'ha', a3 nvarchar2(2) default 'ye', a4 nvarchar2(2) default '', a5 nvarchar2(2)); + +insert into unit_varchar (a1) values ('1111111111123մ11111111111111111111111111111111մʵʵ1'); +insert into unit_varchar (a2) values ('12 '); +-- exceed +insert into unit_varchar (a2) values (''); +insert into unit_varchar (a3) values ('12 '); +-- exceed +insert into unit_varchar (a3) values (''); +insert into unit_varchar (a4) values ('2 '); +-- exceed +insert into unit_varchar (a4) values ('23 '); +-- exceed +insert into unit_varchar (a4) values ('223 '); +insert into unit_varchar (a5) values ('2 '); +-- exceed +insert into unit_varchar (a5) values ('23 '); +-- exceed +insert into unit_varchar (a5) values ('223 '); +-- exceed +update unit_varchar set a2=' '; +update unit_varchar set a3='a '; +-- exceed +update unit_varchar set a5=''; +update unit_varchar set a5=''; +select * from unit_varchar; + +insert into unit_varchar2 (a1) values ('111111111123մ11111111111111111111111111111111մʵʵ1'); +insert into unit_varchar2 (a2) values ('12 '); +-- exceed +insert into unit_varchar2 (a2) values (''); +insert into unit_varchar2 (a3) values ('12 '); +-- exceed +insert into unit_varchar2 (a3) values (''); +insert into unit_varchar2 (a4) values ('2 '); +-- exceed +insert into unit_varchar2 (a4) values ('23 '); +-- exceed +insert into unit_varchar2 (a4) values ('223 '); +insert into unit_varchar2 (a5) values ('2 '); +-- exceed +insert into unit_varchar2 (a5) values ('23 '); +-- exceed +insert into unit_varchar2 (a5) values ('223 '); +ALTER TABLE unit_varchar2 ALTER COLUMN a2 SET data TYPE char(1 char) USING a2::char(1 char); +insert into unit_varchar2 (a2) values ('һ '); +alter table unit_varchar2 modify column a3 varchar2(2 char) default 'ں'; +-- exceed +insert into unit_varchar2 (a2) values ('һe'); +insert into unit_varchar2 (a1) values(default); +select * from unit_varchar2; + +-- exceed +insert into unit_char (a1) values ('1111111111123մ11111111111111111111111111111111մʵʵ1'); +-- exceed +insert into unit_nchar (a1) values (' '); +insert into unit_nchar (a1) values ('1 '); +insert into unit_char (a2) values ('12 '); +-- exceed +insert into unit_char (a2) values (''); +insert into unit_char (a3) values ('12 '); +-- exceed +insert into unit_char (a3) values (''); +insert into unit_char (a4) values ('2 '); +-- exceed +insert into unit_char (a4) values ('23 '); +-- exceed +insert into unit_char (a4) values ('223 '); +insert into unit_char (a5) values ('2 '); +-- exceed +insert into unit_char (a5) values ('23 '); +-- exceed +insert into unit_char (a5) values ('223 '); +ALTER table unit_char ADD COLUMN a6 varchar(3 char) default 'Ĭֵ'; +insert into unit_char (a6) values ('1a '); +-- exceed +insert into unit_char (a6) values ('1234'); +update unit_char set a4=''; +-- execeed +update unit_char set a5='һ3'; +select * from unit_char; + +-- exceed +insert into unit_nchar (a1) values ('1111111111123մ11111111111111111111111111111111մʵʵ1'); +insert into unit_nchar (a1) values (' '); +insert into unit_nchar (a2) values (' '); +-- exceed +insert into unit_nchar (a2) values ('123 '); +-- exceed +insert into unit_nchar (a2) values (''); +insert into unit_nchar (a3) values ('12 '); +insert into unit_nchar (a3) values (''); +insert into unit_nchar (a4) values ('2 '); +-- exceed +insert into unit_nchar (a4) values ('23 '); +-- exceed +insert into unit_nchar (a4) values ('223 '); +insert into unit_nchar (a5) values ('2 '); +-- exceed +insert into unit_nchar (a5) values ('23 '); +-- exceed +insert into unit_nchar (a5) values ('223 '); + +-- exceed +insert into unit_nvarchar2 (a1) values ('1111111111123մ11111111111111111111111111111111մʵʵ1'); +insert into unit_nvarchar2 (a1) values (' '); +insert into unit_nvarchar2 (a2) values (' '); +-- exceed +insert into unit_nvarchar2 (a2) values ('123 '); +-- exceed +insert into unit_nvarchar2 (a2) values (''); +insert into unit_nvarchar2 (a3) values ('12 '); +insert into unit_nvarchar2 (a3) values (''); +insert into unit_nvarchar2 (a4) values ('2 '); +insert into unit_nvarchar2 (a5) values ('2 '); +-- exceed +insert into unit_nvarchar2 (a5) values ('23 '); +-- exceed +insert into unit_nvarchar2 (a5) values ('223 '); + + + +create table test_char(col char(20 char)); +insert into test_char values ('һ'), ('asdһbsd'), ('һ߰˾ʮһ߰˾ʮ '), ('һ2 '); +select col, length(col), lengthb(col) from test_char; + +create table test_varchar(col varchar(20 char)); +insert into test_varchar values ('һ'), ('asdһbsd'), ('һ߰˾ʮһ߰˾ʮ '), ('һ2 '); +select col, length(col), lengthb(col) from test_varchar; + +create table test_charb(col char(20)); +insert into test_charb values ('һ'), ('asdһbs '), ('һ '), ('һ2 '); +select col, length(col), lengthb(col) from test_charb; + +create table test_varcharb(col varchar(20)); +insert into test_varcharb values ('һ'), ('asdһbs '), ('һ '), ('һ2 '); +select col, length(col), lengthb(col) from test_varcharb; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_002.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_002.sql new file mode 100644 index 0000000000..53940da925 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_002.sql @@ -0,0 +1,2011 @@ +-- +--FOR BLACKLIST FEATURE: REFERENCES/INHERITS/WITH OIDS/RULE/CREATE TYPE/DOMAIN is not supported. +-- + +-- +-- ALTER_TABLE +-- add attribute +-- + +CREATE TABLE atmp1 (initial int4); + +COMMENT ON TABLE tmp_wrong IS 'table comment'; +COMMENT ON TABLE atmp1 IS 'table comment'; +COMMENT ON TABLE atmp1 IS NULL; + +ALTER TABLE atmp1 ADD COLUMN xmin integer; -- fails + +ALTER TABLE atmp1 ADD COLUMN a int4 default 3; + +ALTER TABLE atmp1 ADD COLUMN b name; + +ALTER TABLE atmp1 ADD COLUMN c text; + +ALTER TABLE atmp1 ADD COLUMN d float8; + +ALTER TABLE atmp1 ADD COLUMN e float4; + +ALTER TABLE atmp1 ADD COLUMN f int2; + +ALTER TABLE atmp1 ADD COLUMN g polygon; + +ALTER TABLE atmp1 ADD COLUMN h abstime; + +ALTER TABLE atmp1 ADD COLUMN i char; + +ALTER TABLE atmp1 ADD COLUMN j abstime[]; + +ALTER TABLE atmp1 ADD COLUMN k int4; + +ALTER TABLE atmp1 ADD COLUMN l tid; + +ALTER TABLE atmp1 ADD COLUMN m xid; + +ALTER TABLE atmp1 ADD COLUMN n oidvector; + +--ALTER TABLE atmp1 ADD COLUMN o lock; +ALTER TABLE atmp1 ADD COLUMN p smgr; + +ALTER TABLE atmp1 ADD COLUMN q point; + +ALTER TABLE atmp1 ADD COLUMN r lseg; + +ALTER TABLE atmp1 ADD COLUMN s path; + +ALTER TABLE atmp1 ADD COLUMN t box; + +ALTER TABLE atmp1 ADD COLUMN u tinterval; + +ALTER TABLE atmp1 ADD COLUMN v timestamp; + +ALTER TABLE atmp1 ADD COLUMN w interval; + +ALTER TABLE atmp1 ADD COLUMN x float8[]; + +ALTER TABLE atmp1 ADD COLUMN y float4[]; + +ALTER TABLE atmp1 ADD COLUMN z int2[]; + +INSERT INTO atmp1 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp1; + +----drop table tmp; + +-- the wolf bug - schema mods caused inconsistent row descriptors +CREATE TABLE atmp2 ( + initial int4 +); + +ALTER TABLE atmp2 ADD COLUMN a int4; + +ALTER TABLE atmp2 ADD COLUMN b name; + +ALTER TABLE atmp2 ADD COLUMN c text; + +ALTER TABLE atmp2 ADD COLUMN d float8; + +ALTER TABLE atmp2 ADD COLUMN e float4; + +ALTER TABLE atmp2 ADD COLUMN f int2; + +ALTER TABLE atmp2 ADD COLUMN g polygon; + +ALTER TABLE atmp2 ADD COLUMN h abstime; + +ALTER TABLE atmp2 ADD COLUMN i char; + +ALTER TABLE atmp2 ADD COLUMN j abstime[]; + +ALTER TABLE atmp2 ADD COLUMN k int4; + +ALTER TABLE atmp2 ADD COLUMN l tid; + +ALTER TABLE atmp2 ADD COLUMN m xid; + +ALTER TABLE atmp2 ADD COLUMN n oidvector; + +--ALTER TABLE atmp2 ADD COLUMN o lock; +ALTER TABLE atmp2 ADD COLUMN p smgr; + +ALTER TABLE atmp2 ADD COLUMN q point; + +ALTER TABLE atmp2 ADD COLUMN r lseg; + +ALTER TABLE atmp2 ADD COLUMN s path; + +ALTER TABLE atmp2 ADD COLUMN t box; + +ALTER TABLE atmp2 ADD COLUMN u tinterval; + +ALTER TABLE atmp2 ADD COLUMN v timestamp; + +ALTER TABLE atmp2 ADD COLUMN w interval; + +ALTER TABLE atmp2 ADD COLUMN x float8[]; + +ALTER TABLE atmp2 ADD COLUMN y float4[]; + +ALTER TABLE atmp2 ADD COLUMN z int2[]; + +INSERT INTO atmp2 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp2; + +----drop table tmp; + + +-- +-- rename - check on both non-temp and temp tables +-- +CREATE TABLE atmp3 (regtable int); +-- Enforce use of COMMIT instead of 2PC for temporary objects +\set VERBOSITY verbose +-- CREATE TEMP TABLE tmp (tmptable int); + +ALTER TABLE atmp3 RENAME TO tmp_new; + +-- SELECT * FROM tmp; +-- SELECT * FROM tmp_new; + +-- ALTER TABLE tmp RENAME TO tmp_new2; + +SELECT * FROM tmp_new; +-- SELECT * FROM tmp_new2; + +----drop table tmp_new; +-- ----drop table tmp_new2; +CREATE TABLE atmp4 (ch1 character(1)); +insert into atmp4 values ('asdv'); +----drop table tmp; +\set VERBOSITY default + + +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); + +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + +CREATE TABLE stud_emp ( + name text, + age int4, + location point, + salary int4, + manager name, + gpa float8, + percent int4 +) with(autovacuum_enabled = off); + +-- ALTER TABLE ... RENAME on non-table relations +-- renaming indexes (FIXME: this should probably test the index's functionality) +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1; + +ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1; + +-- renaming views +CREATE VIEW tmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE tmp_view RENAME TO tmp_view_new; + +-- hack to ensure we get an indexscan here +ANALYZE tenk1; +set enable_seqscan to off; +set enable_bitmapscan to off; +-- 5 values, sorted +SELECT unique1 FROM tenk1 WHERE unique1 < 5 ORDER BY unique1; +reset enable_seqscan; +reset enable_bitmapscan; + +DROP VIEW tmp_view_new; +-- toast-like relation name +alter table stud_emp rename to pg_toast_stud_emp; +alter table pg_toast_stud_emp rename to stud_emp; + +-- renaming index should rename constraint as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraint +ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); +ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo; + +-- renaming constraint should rename index as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +DROP INDEX onek_unique1_constraint; -- to see whether it's there +ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo; +DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraints vs. inheritance +CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); +\d constraint_rename_test +CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test); +create table constraint_rename_test2 (like constraint_rename_test ); +\d constraint_rename_test2 +ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail +ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok +\d constraint_rename_test +\d constraint_rename_test2 +ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT; +ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok +\d constraint_rename_test +\d constraint_rename_test2 +ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a); +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok +\d constraint_rename_test +\d constraint_rename_test2 +----drop table constraint_rename_test2; +----drop table constraint_rename_test; +ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a); + +-- FOREIGN KEY CONSTRAINT adding TEST + +CREATE TABLE tmp2 (a int primary key); + +CREATE TABLE tmp3 (a int, b int); + +CREATE TABLE tmp4 (a int, b int, unique(a,b)); + +CREATE TABLE tmp5 (a int, b int); + +-- Insert rows into tmp2 (pktable) +INSERT INTO tmp2 values (1); +INSERT INTO tmp2 values (2); +INSERT INTO tmp2 values (3); +INSERT INTO tmp2 values (4); + +-- Insert rows into tmp3 +INSERT INTO tmp3 values (1,10); +INSERT INTO tmp3 values (1,20); +INSERT INTO tmp3 values (5,50); + +-- Try (and fail) to add constraint due to invalid source columns +ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full; + +-- Try (and fail) to add constraint due to invalide destination columns explicitly given +ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full; + +-- Try (and fail) to add constraint due to invalid data +ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; + +-- Delete failing row +alter table tmp3 replica identity full; +DELETE FROM tmp3 where a=5; + +-- Try (and succeed) +ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; +ALTER TABLE tmp3 drop constraint tmpconstr; + +INSERT INTO tmp3 values (5,50); + +-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate +ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full NOT VALID; +ALTER TABLE tmp3 validate constraint tmpconstr; + +-- Delete failing row +DELETE FROM tmp3 where a=5; + +-- Try (and succeed) and repeat to show it works on already valid constraint +ALTER TABLE tmp3 validate constraint tmpconstr; +ALTER TABLE tmp3 validate constraint tmpconstr; + +-- Try a non-verified CHECK constraint +ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +DELETE FROM tmp3 WHERE NOT b > 10; +ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds + +-- Test inherited NOT VALID CHECK constraints +select * from tmp3; + +-- Try (and fail) to create constraint from tmp5(a) to tmp4(a) - unique constraint on +-- tmp4 is a,b + +ALTER TABLE tmp5 add constraint tmpconstr foreign key(a) references tmp4(a) match full; + +----drop table tmp5; + +----drop table tmp4; + +----drop table tmp3; + +----drop table tmp2; + +-- NOT VALID with plan invalidation -- ensure we don't use a constraint for +-- exclusion until validated +set constraint_exclusion TO 'partition'; +create table nv_parent (d date); +create table nv_child_2010 () inherits (nv_parent); +create table nv_child_2010 (like nv_parent); +create table nv_child_2011 () inherits (nv_parent); +create table nv_child_2011 (like nv_parent including all); +alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid; +alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid; +explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31'; +create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent); +explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date; +explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; +-- after validation, the constraint should be used +alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check; +explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; + + +-- Foreign key adding test with mixed types + +-- Note: these tables are TEMP to avoid name conflicts when this test +-- is run in parallel with foreign_key.sql. + +CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); +INSERT INTO PKTABLE VALUES(42); +CREATE TABLE FKTABLE (ftest1 inet); +-- This next should fail, because int=inet does not exist +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; +-- This should also fail for the same reason, but here we +-- give the column name +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1); +----drop table FKTABLE; +-- This should succeed, even though they are different types, +-- because int=int8 exists and is a member of the integer opfamily +CREATE TABLE FKTABLE1 (ftest1 int8); +ALTER TABLE FKTABLE1 ADD FOREIGN KEY(ftest1) references pktable; +-- Check it actually works +INSERT INTO FKTABLE1 VALUES(42); -- should succeed +INSERT INTO FKTABLE1 VALUES(43); -- should fail +----drop table FKTABLE; +-- This should fail, because we'd have to cast numeric to int which is +-- not an implicit coercion (or use numeric=numeric, but that's not part +-- of the integer opfamily) +CREATE TABLE FKTABLE2 (ftest1 numeric); +ALTER TABLE FKTABLE2 ADD FOREIGN KEY(ftest1) references pktable; +----drop table FKTABLE; +----drop table PKTABLE; +-- On the other hand, this should work because int implicitly promotes to +-- numeric, and we allow promotion on the FK side +CREATE TABLE PKTABLE1 (ptest1 numeric PRIMARY KEY); +INSERT INTO PKTABLE1 VALUES(42); +CREATE TABLE FKTABLE3 (ftest1 int); +ALTER TABLE FKTABLE3 ADD FOREIGN KEY(ftest1) references pktable1; +-- Check it actually works +INSERT INTO FKTABLE3 VALUES(42); -- should succeed +INSERT INTO FKTABLE3 VALUES(43); -- should fail +----drop table FKTABLE; +----drop table PKTABLE; + +CREATE TABLE PKTABLE2 (ptest1 int, ptest2 inet, + PRIMARY KEY(ptest1, ptest2)); +-- This should fail, because we just chose really odd types +CREATE TABLE FKTABLE4 (ftest1 cidr, ftest2 timestamp); +ALTER TABLE FKTABLE4 ADD FOREIGN KEY(ftest1, ftest2) references pktable2; +----drop table FKTABLE; +-- Again, so should this... +CREATE TABLE FKTABLE5 (ftest1 cidr, ftest2 timestamp); +ALTER TABLE FKTABLE5 ADD FOREIGN KEY(ftest1, ftest2) + references pktable2(ptest1, ptest2); +----drop table FKTABLE; +-- This fails because we mixed up the column ordering +CREATE TABLE FKTABLE6 (ftest1 int, ftest2 inet); +ALTER TABLE FKTABLE6 ADD FOREIGN KEY(ftest1, ftest2) + references pktable2(ptest2, ptest1); +-- As does this... +ALTER TABLE FKTABLE6 ADD FOREIGN KEY(ftest2, ftest1) + references pktable2(ptest1, ptest2); + +-- temp tables should go away by themselves, need not drop them. + +-- test check constraint adding + +create table at1acc1 ( test int ); +-- add a check constraint +alter table at1acc1 add constraint at1acc_test1 check (test>3); +-- should fail +insert into at1acc1 (test) values (2); +-- should succeed +insert into at1acc1 (test) values (4); +----drop table atacc1; + +-- let's do one where the check fails when added +create table at2acc1 ( test int ); +-- insert a soon to be failing row +insert into at2acc1 (test) values (2); +-- add a check constraint (fails) +alter table at2acc1 add constraint at2acc_test1 check (test>3); +insert into at2acc1 (test) values (4); +----drop table atacc1; + +-- let's do one where the check fails because the column doesn't exist +create table at3acc1 ( test int ); +-- add a check constraint (fails) +alter table at3acc1 add constraint at3acc_test1 check (test1>3); +----drop table atacc1; + +-- something a little more complicated +create table at4acc1 ( test int, test2 int, test3 int); +-- add a check constraint (fails) +alter table at4acc1 add constraint at4acc_test1 check (test+test23), test2 int); +alter table at5acc1 add check (test2>test); +-- should fail for $2 +insert into at5acc1 (test2, test) values (3, 4); +----drop table atacc1; + +-- inheritance related tests +create table at6acc1 (test int); +create table at6acc2 (test2 int); +create table at6acc3 (test3 int) inherits (at6acc1, at6acc2); +alter table at6acc2 add constraint foo check (test2>0); +-- fail and then succeed on atacc2 +insert into at6acc2 (test2) values (-3); +insert into at6acc2 (test2) values (3); +-- fail and then succeed on atacc3 +insert into at6acc3 (test2) values (-3); +insert into at6acc3 (test2) values (3); +----drop table atacc3; +----drop table atacc2; +----drop table atacc1; + +-- same things with one created with INHERIT +create table at7acc1 (test int); +create table at7acc2 (test2 int); +create table at7acc3 (test3 int) inherits (at7acc1, at7acc2); +alter table at7acc3 no inherit at7acc2; +-- fail +alter table at7acc3 no inherit at7acc2; +-- make sure it really isn't a child +insert into at7acc3 (test2) values (3); +select test2 from atacc2; +-- fail due to missing constraint +alter table at7acc2 add constraint foo check (test2>0); +alter table at7acc3 inherit atacc2; +-- fail due to missing column +alter table at7acc3 rename test2 to testx; +alter table at7acc3 inherit atacc2; +-- fail due to mismatched data type +alter table at7acc3 add test2 bool; +alter table at7acc3 inherit atacc2; +alter table at7acc3 drop test2; +-- succeed +alter table at7acc3 add test2 int; +alter table at7acc3 replica identity full; +update at7acc3 set test2 = 4 where test2 is null; +alter table at7acc3 add constraint foo check (test2>0); +alter table at7acc3 inherit at7acc2; +-- fail due to duplicates and circular inheritance +alter table at7acc3 inherit at7acc2; +alter table at7acc2 inherit at7acc3; +alter table at7acc2 inherit at7acc2; +-- test that we really are a child now (should see 4 not 3 and cascade should go through) +select test2 from at7acc2; +----drop table atacc2 cascade; +----drop table atacc1; + +-- adding only to a parent is allowed as of 9.2 + +create table at8acc1 (test int); +create table at8acc2 (test2 int) inherits (at8acc1); +-- ok: +alter table at8acc1 add constraint foo check (test>0) no inherit; +-- check constraint is not there on child +insert into at8acc2 (test) values (-3); +-- check constraint is there on parent +insert into at8acc1 (test) values (-3); +insert into at8acc1 (test) values (3); +-- fail, violating row: +alter table at8acc2 add constraint foo check (test>0) no inherit; +----drop table atacc2; +----drop table atacc1; + +-- test unique constraint adding + +create table at9acc1 ( test int ) with oids; +-- add a unique constraint +alter table at9acc1 add constraint at9acc_test1 unique (test); +-- insert first value +insert into at9acc1 (test) values (2); +-- should fail +insert into at9acc1 (test) values (2); +-- should succeed +insert into at9acc1 (test) values (4); +-- try adding a unique oid constraint +alter table at9acc1 add constraint atacc_oid1 unique(oid); +-- try to create duplicates via alter table using - should fail +alter table at9acc1 alter column test type integer using 0; +----drop table atacc1; + +-- let's do one where the unique constraint fails when added +create table a1tacc1 ( test int ); +-- insert soon to be failing rows +insert into a1tacc1 (test) values (2); +insert into a1tacc1 (test) values (2); +-- add a unique constraint (fails) +alter table a1tacc1 add constraint a1tacc_test1 unique (test); +insert into a1tacc1 (test) values (3); +--drop table atacc1; + +-- let's do one where the unique constraint fails +-- because the column doesn't exist +create table a2tacc1 ( test int ); +-- add a unique constraint (fails) +alter table a2tacc1 add constraint a2tacc_test1 unique (test1); +--drop table atacc1; + +-- something a little more complicated +create table a2tacc1 ( test int, test2 int); +-- add a unique constraint +alter table a2tacc1 add constraint a2tacc_test1 unique (test, test2); +-- insert initial value +insert into a2tacc1 (test,test2) values (4,4); +-- should fail +insert into a2tacc1 (test,test2) values (4,4); +-- should all succeed +insert into a2tacc1 (test,test2) values (4,5); +insert into a2tacc1 (test,test2) values (5,4); +insert into a2tacc1 (test,test2) values (5,5); +--drop table atacc1; + +-- lets do some naming tests +create table a3tacc1 (test int, test2 int, unique(test)); +alter table a3tacc1 add unique (test2); +-- should fail for @@ second one @@ +insert into a3tacc1 (test2, test) values (3, 3); +insert into a3tacc1 (test2, test) values (2, 3); +--drop table atacc1; + +-- test primary key constraint adding + +create table a4tacc1 ( test int ) with oids; +-- add a primary key constraint +alter table a4tacc1 add constraint a4tacc_test1 primary key (test); +-- insert first value +insert into a4tacc1 (test) values (2); +-- should fail +insert into a4tacc1 (test) values (2); +-- should succeed +insert into a4tacc1 (test) values (4); +-- inserting NULL should fail +insert into a4tacc1 (test) values(NULL); +-- try adding a second primary key (should fail) +alter table a4tacc1 add constraint atacc_oid1 primary key(oid); +-- drop first primary key constraint +alter table a4tacc1 drop constraint a4tacc_test1 restrict; +-- try adding a primary key on oid (should succeed) +alter table a4tacc1 add constraint atacc_oid1 primary key(oid); +--drop table a4tacc1; + +-- let's do one where the primary key constraint fails when added +create table a5tacc1 ( test int ); +-- insert soon to be failing rows +insert into a5tacc1 (test) values (2); +insert into a5tacc1 (test) values (2); +-- add a primary key (fails) +alter table a5tacc1 add constraint a5tacc_test1 primary key (test); +insert into a5tacc1 (test) values (3); +--drop table a5tacc1; + +-- let's do another one where the primary key constraint fails when added +create table a6tacc1 ( test int ); +-- insert soon to be failing row +insert into a6tacc1 (test) values (NULL); +-- add a primary key (fails) +alter table a6tacc1 add constraint a6tacc_test1 primary key (test); +insert into a6tacc1 (test) values (3); +--drop table atacc1; + +-- let's do one where the primary key constraint fails +-- because the column doesn't exist +create table a7tacc1 ( test int ); +-- add a primary key constraint (fails) +alter table a7tacc1 add constraint a7tacc_test1 primary key (test1); +--drop table atacc1; + +-- adding a new column as primary key to a non-empty table. +-- should fail unless the column has a non-null default value. +create table a8tacc1 ( test int ); +insert into a8tacc1 (test) values (0); +-- add a primary key column without a default (fails). +alter table a8tacc1 add column test2 int primary key; +-- now add a primary key column with a default (succeeds). +alter table a8tacc1 add column test2 int default 0 primary key; +--drop table atacc1; + +-- something a little more complicated +create table a9tacc1 ( test int, test2 int); +-- add a primary key constraint +alter table a9tacc1 add constraint a9tacc_test1 primary key (test, test2); +-- try adding a second primary key - should fail +alter table a9tacc1 add constraint atacc_test2 primary key (test); +-- insert initial value +insert into a9tacc1 (test,test2) values (4,4); +-- should fail +insert into a9tacc1 (test,test2) values (4,4); +insert into a9tacc1 (test,test2) values (NULL,3); +insert into a9tacc1 (test,test2) values (3, NULL); +insert into a9tacc1 (test,test2) values (NULL,NULL); +-- should all succeed +insert into a9tacc1 (test,test2) values (4,5); +insert into a9tacc1 (test,test2) values (5,4); +insert into a9tacc1 (test,test2) values (5,5); +--drop table atacc1; + +-- lets do some naming tests +create table at10acc1 (test int, test2 int, primary key(test)); +-- only first should succeed +insert into at10acc1 (test2, test) values (3, 3); +insert into at10acc1 (test2, test) values (2, 3); +insert into at10acc1 (test2, test) values (1, NULL); +--drop table atacc1; + +-- alter table modify not null +-- try altering syscatlog should fail +alter table pg_class modify (relname not null enable); +alter table pg_class modify relname not null enable; +-- try altering non-existent table should fail +alter table non_existent modify (bar not null enable); +-- test alter table +create table test_modify (a int, b int); +alter table test_modify replica identity full; +alter table test_modify modify (b not null enable); +insert into test_modify(b) values (null); +insert into test_modify values (1, null); +alter table test_modify modify(b null); +insert into test_modify values (1, null); +alter table test_modify modify (b not null enable); +alter table test_modify replica identity full; +delete from test_modify; +alter table test_modify modify (a not null, b not null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (a null, b null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (b constraint ak not null); +delete from test_modify; +alter table test_modify modify (b constraint ak not null); +insert into test_modify values(1,1); +insert into test_modify values(1,null); +alter table test_modify modify (b constraint ak null); +insert into test_modify values(1,null); +alter table test_modify modify (a null, a not null); +-- try alter view should fail +create view test_modify_view as select * from test_modify; +alter table test_modify_view modify (a not null enable); +drop view test_modify_view; +--drop table test_modify; + +-- alter table / alter column [set/drop] not null tests +-- try altering system catalogs, should fail +alter table pg_class alter column relname drop not null; +alter table pg_class alter relname set not null; + +-- try altering non-existent table, should fail +alter table non_existent alter column bar set not null; +alter table non_existent alter column bar drop not null; + +-- test setting columns to null and not null and vice versa +-- test checking for null values and primary key +create table at11acc1 (test int not null) with oids; +alter table at11acc1 add constraint "atacc1_pkey" primary key (test); +alter table at11acc1 alter column test drop not null; +alter table at11acc1 drop constraint "atacc1_pkey"; +alter table at11acc1 alter column test drop not null; +insert into at11acc1 values (null); +alter table at11acc1 alter test set not null; +atler table at11acc1 replica identity full; +delete from at11acc1; +alter table at11acc1 alter test set not null; + +-- try altering a non-existent column, should fail +alter table at11acc1 alter bar set not null; +alter table at11acc1 alter bar drop not null; + +-- try altering the oid column, should fail +alter table at11acc1 alter oid set not null; +alter table at11acc1 alter oid drop not null; + +-- try creating a view and altering that, should fail +create view myview as select * from at11acc1; +alter table myview alter column test drop not null; +alter table myview alter column test set not null; +drop view myview; + +--drop table atacc1; + +-- test inheritance +create table parent (a int); +create table child1 (b varchar(255)) inherits (parent); +create table child1 (like parent); +alter table child1 add column (b varchar(255)); + +alter table parent alter a set not null; +insert into parent values (NULL); +insert into child1 (a, b) values (NULL, 'foo'); +alter table parent alter a drop not null; +insert into parent values (NULL); +insert into child1 (a, b) values (NULL, 'foo'); +alter table only parent alter a set not null; +alter table child1 alter a set not null; +alter table parent replica identity full; +alter table child1 replica identity full; +delete from parent; +alter table only parent alter a set not null; +insert into parent values (NULL); +alter table child1 alter a set not null; +insert into child1 (a, b) values (NULL, 'foo'); +delete from child1; +alter table child1 alter a set not null; +insert into child1 (a, b) values (NULL, 'foo'); +--drop table child; +--drop table parent; + +-- test setting and removing default values +create table def_test ( + c1 int4 default 5, + c2 text default 'initial_default' +); +insert into def_test default values; +alter table def_test alter column c1 drop default; +insert into def_test default values; +alter table def_test alter column c2 drop default; +insert into def_test default values; +alter table def_test alter column c1 set default 10; +alter table def_test alter column c2 set default 'new_default'; +insert into def_test default values; +select * from def_test order by 1, 2; + +-- set defaults to an incorrect type: this should fail +alter table def_test alter column c1 set default 'wrong_datatype'; +alter table def_test alter column c2 set default 20; + +-- set defaults on a non-existent column: this should fail +alter table def_test alter column c3 set default 30; + +-- set defaults on views: we need to create a view, add a rule +-- to allow insertions into it, and then alter the view to add +-- a default +create view def_view_test as select * from def_test; +create rule def_view_test_ins as + on insert to def_view_test + do instead insert into def_test select new.*; +insert into def_view_test default values; +alter table def_view_test alter column c1 set default 45; +insert into def_view_test default values; +alter table def_view_test alter column c2 set default 'view_default'; +insert into def_view_test default values; +select * from def_view_test order by 1, 2; + +drop rule def_view_test_ins on def_view_test; +drop view def_view_test; +--drop table def_test; + +-- alter table / drop column tests +-- try altering system catalogs, should fail +alter table pg_class drop column relname; + +-- try altering non-existent table, should fail +alter table nosuchtable drop column bar; + +-- test dropping columns +create table at12acc1 (a int4 not null, b int4, c int4 not null, d int4) with oids; +insert into at12acc1 values (1, 2, 3, 4); +alter table at12acc1 drop a; +alter table at12acc1 drop a; + +-- SELECTs +select * from at12acc1; +select * from at12acc1 order by a; +select * from at12acc1 order by "........pg.dropped.1........"; +select * from at12acc1 group by a; +select * from at12acc1 group by "........pg.dropped.1........"; +select at12acc1.* from at12acc1; +select a from at12acc1; +select at12acc1.a from at12acc1; +select b,c,d from at12acc1; +select a,b,c,d from at12acc1; +select * from at12acc1 where a = 1; +select "........pg.dropped.1........" from at12acc1; +select at12acc1."........pg.dropped.1........" from at12acc1; +select "........pg.dropped.1........",b,c,d from at12acc1; +select * from at12acc1 where "........pg.dropped.1........" = 1; +alter table at12acc1 replica identity full; +-- UPDATEs +update at12acc1 set a = 3; +update at12acc1 set b = 2 where a = 3; +update at12acc1 set "........pg.dropped.1........" = 3; +update at12acc1 set b = 2 where "........pg.dropped.1........" = 3; + +-- INSERTs +insert into at12acc1 values (10, 11, 12, 13); +insert into at12acc1 values (default, 11, 12, 13); +insert into at12acc1 values (11, 12, 13); +insert into at12acc1 (a) values (10); +insert into at12acc1 (a) values (default); +insert into at12acc1 (a,b,c,d) values (10,11,12,13); +insert into at12acc1 (a,b,c,d) values (default,11,12,13); +insert into at12acc1 (b,c,d) values (11,12,13); +insert into at12acc1 ("........pg.dropped.1........") values (10); +insert into at12acc1 ("........pg.dropped.1........") values (default); +insert into at12acc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13); +insert into at12acc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13); + +-- DELETEs +alter table at12acc1 replica identity full; +delete from at12acc1 where a = 3; +delete from at12acc1 where "........pg.dropped.1........" = 3; +delete from at12acc1; + +-- try dropping a non-existent column, should fail +alter table at12acc1 drop bar; + +-- try dropping the oid column, should succeed +alter table at12acc1 drop oid; + +-- try dropping the xmin column, should fail +alter table at12acc1 drop xmin; + +-- try creating a view and altering that, should fail +create view myview as select * from at12acc1; +select * from myview; +alter table myview drop d; +drop view myview; + +-- test some commands to make sure they fail on the dropped column +analyze at12acc1(a); +analyze at12acc1("........pg.dropped.1........"); +vacuum analyze at12acc1(a); +vacuum analyze at12acc1("........pg.dropped.1........"); +comment on column at12acc1.a is 'testing'; +comment on column at12acc1."........pg.dropped.1........" is 'testing'; +alter table at12acc1 alter a set storage plain; +alter table at12acc1 alter "........pg.dropped.1........" set storage plain; +alter table at12acc1 alter a set statistics 0; +alter table at12acc1 alter "........pg.dropped.1........" set statistics 0; +alter table at12acc1 alter a set default 3; +alter table at12acc1 alter "........pg.dropped.1........" set default 3; +alter table at12acc1 alter a drop default; +alter table at12acc1 alter "........pg.dropped.1........" drop default; +alter table at12acc1 alter a set not null; +alter table at12acc1 alter "........pg.dropped.1........" set not null; +alter table at12acc1 alter a drop not null; +alter table at12acc1 alter "........pg.dropped.1........" drop not null; +alter table at12acc1 rename a to x; +alter table at12acc1 rename "........pg.dropped.1........" to x; +alter table at12acc1 add primary key(a); +alter table at12acc1 add primary key("........pg.dropped.1........"); +alter table at12acc1 add unique(a); +alter table at12acc1 add unique("........pg.dropped.1........"); +alter table at12acc1 add check (a > 3); +alter table at12acc1 add check ("........pg.dropped.1........" > 3); +create table atacc2 (id int4 unique); +alter table at12acc1 add foreign key (a) references atacc2(id); +alter table at12acc1 add foreign key ("........pg.dropped.1........") references atacc2(id); +alter table atacc2 add foreign key (id) references at12acc1(a); +alter table atacc2 add foreign key (id) references at12acc1("........pg.dropped.1........"); +--drop table atacc2; +create index "testing_idx" on at12acc1(a); +create index "testing_idx" on at12acc1("........pg.dropped.1........"); + +-- test create as and select into +insert into at12acc1 values (21, 22, 23); +create table test1 as select * from at12acc1; +select * from test1; +--drop table test1; +select * into test2 from at12acc1; +select * from test2; +--drop table test2; + +-- try dropping all columns +alter table at12acc1 drop c; +alter table at12acc1 drop d; +alter table at12acc1 drop b; +select * from at12acc1; + +--drop table atacc1; +-- test constraint error reporting in presence of dropped columns +create table at13acc1 (id serial primary key, value int check (value < 10)); +insert into at13acc1(value) values (100); +alter table at13acc1 drop column value; +alter table at13acc1 add column value int check (value < 10); +insert into at13acc1(value) values (100); +insert into at13acc1(id, value) values (null, 0); +alter table at13acc1 alter column id set default 10; +drop sequence at13acc1_id_seq; + +-- test inheritance +create table parent (a int, b int, c int); +insert into parent values (1, 2, 3); +alter table parent drop a; +create table child (d varchar(255)) inherits (parent); +create table child2 as select * from parent; +alter table child2 add column d varchar(255); +insert into child2 values (12, 13, 'testing'); + +select * from parent order by b; +select * from child2; +alter table parent drop c; +select * from parent order by b; +select * from child2; + +--drop table child; +--drop table parent; + +-- test copy in/out +create table test (a int4, b int4, c int4); +insert into test values (1,2,3); +alter table test drop a; +copy test to stdout; +copy test(a) to stdout; +copy test("........pg.dropped.1........") to stdout; +copy test from stdin; +10 11 12 +\. +select * from test order by b; +copy test from stdin; +21 22 +\. +select * from test order by b; +copy test(a) from stdin; +copy test("........pg.dropped.1........") from stdin; +copy test(b,c) from stdin; +31 32 +\. +select * from test order by b; +--drop table test; + +-- test inheritance + +create table dropColumn (a int, b int, e int); +create table dropColumnChild (c int) inherits (dropColumn); +select * into dropColumnChild from dropColumn; +alter table dropColumnChild add column c int; +create table dropColumnAnother (d int) inherits (dropColumnChild); +select * into dropColumnAnother from dropColumnChild; +alter table dropColumnAnother add column d int; +-- these two should fail +alter table dropColumnchild drop column a; +alter table only dropColumnChild drop column b; + + + +-- these three should work +alter table only dropColumn drop column e; +alter table dropColumnChild drop column c; +alter table dropColumn drop column a; + +create table renameColumn (a int); +create table renameColumnChild (b int) inherits (renameColumn); +create table renameColumnChild as select * from renameColumn; +create table renameColumnAnother (c int) inherits (renameColumnChild); +select * into renameColumnAnother from renameColumnChild; +alter table renameColumnAnother add column b int; + +-- these three should fail +alter table renameColumnChild rename column a to d; +alter table only renameColumnChild rename column a to d; +alter table only renameColumn rename column a to d; + +-- these should work +alter table renameColumn rename column a to d; +alter table renameColumnChild rename column b to a; + +-- these should work +alter table if exists doesnt_exist_tab rename column a to d; +alter table if exists doesnt_exist_tab rename column b to a; + +-- this should work +alter table renameColumn add column w int; + +-- this should fail +alter table only renameColumn add column x int; + + +-- Test corner cases in dropping of inherited columns + +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); +create table c1 (like p1); +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +-- should work +alter table p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +select f1 from c1; +alter table c1 drop column f1; +select f1 from c1; + +--drop table p1 cascade; + +create table p11 (f1 int, f2 int); +create table c11 () inherits(p11); +create table c11 (like p11); +-- should be rejected since c1.f1 is inherited +alter table c11 drop column f1; +alter table p11 drop column f1; +-- c1.f1 is dropped now, since there is no local definition for it +select f1 from c11; + +--drop table p1 cascade; + +create table p12 (f1 int, f2 int); +create table c12 () inherits(p12); +create table c12 as select * from p12; +-- should be rejected since c1.f1 is inherited +alter table c12 drop column f1; +alter table only p1 drop column f1; +-- c1.f1 is NOT dropped, but must now be considered non-inherited +alter table c12 drop column f1; + +--drop table p1 cascade; + +create table p13 (f1 int, f2 int); +create table c13 (f1 int not null) inherits(p1); +create table c13 as select * from p13; +-- should be rejected since c1.f1 is inherited +alter table c13 drop column f1; +alter table only p13 drop column f1; +-- c1.f1 is still there, but no longer inherited +alter table c13 drop column f1; + +--drop table p1 cascade; + +create table p14(id int, name text); +create table p24(id2 int, name text, height int); +create table c14(age int) inherits(p1,p2); +create table c14 as select * from p1,p2; +alter table c14 add column age int; +create table gc1() inherits (c14); +select * into gc1 from c14; + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +-- should work +alter table only p14 drop column name; +-- should work. Now c1.name is local and inhcount is 0. +alter table p24 drop column name; +-- should be rejected since its inherited +alter table gc1 drop column name; +-- should work, and drop gc1.name along +alter table c14 drop column name; +-- should fail: column does not exist +alter table gc1 drop column name; +-- should work and drop the attribute in all tables +alter table p24 drop column height; + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +--drop table p1, p2 cascade; + +-- +-- Test the ALTER TABLE SET WITH/WITHOUT OIDS command +-- +create table altstartwith (col integer) with oids; + +insert into altstartwith values (1); + +select oid > 0, * from altstartwith; + +alter table altstartwith set without oids; + +select oid > 0, * from altstartwith; -- fails +select * from altstartwith; + +alter table altstartwith set with oids; + +select oid > 0, * from altstartwith; + +--drop table altstartwith; + +-- Check inheritance cases +create table altwithoid (col integer) with oids; + +-- Inherits parents oid column anyway +create table altinhoid () inherits (altwithoid) without oids; + +insert into altinhoid values (1); + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +alter table altwithoid set without oids; + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; -- fails +select * from altwithoid; +select * from altinhoid; + +alter table altwithoid set with oids; + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +--drop table altwithoid cascade; + +create table altwithoid1 (col integer) without oids; + +-- child can have local oid column +create table altinhoid1 () inherits (altwithoid1) with oids; + +insert into altinhoid1 values (1); + +select oid > 0, * from altwithoid1; -- fails +select oid > 0, * from altinhoid1; + +alter table altwithoid1 set with oids; + +select oid > 0, * from altwithoid1; +select oid > 0, * from altinhoid1; + +-- the child's local definition should remain +alter table altwithoid1 set without oids; + +select oid > 0, * from altwithoid1; -- fails +select oid > 0, * from altinhoid1; + +--drop table altwithoid cascade; + +-- test renumbering of child-table columns in inherited operations + +create table p15 (f1 int); +create table c15 (f2 text, f3 int) inherits (p1); +create table c15 as select * from p15; +alter table c15 add column f2 text, add column f3 int; +alter table p15 add column a1 int check (a1 > 0); +alter table p15 add column f2 text; + +insert into p15 values (1,2,'abc'); +insert into c15 values(11,'xyz',33,0); -- should fail +insert into c15 values(11,'xyz',33,22); + +select * from p15 order by f1; +alter table p15 replica identity full; +update p15 set a1 = a1 + 1, f2 = upper(f2); +select * from p15 order by f1; + +--drop table p1 cascade; + +-- test that operations with a dropped column do not try to reference +-- its datatype + +-- create domain mytype as text; +create type mytype as (a text); +create table foo (f1 text, f2 mytype, f3 text); + +insert into foo values('bb','cc','dd'); +select * from foo order by f1; + +-- drop domain mytype cascade; + +select * from foo order by f1; +insert into foo values('qq','rr'); +select * from foo order by f1; +alter table foo replica identity full; +update foo set f3 = 'zz'; +select * from foo order by f1; +select f3,max(f1) from foo group by f3; + +-- Simple tests for alter table column type +alter table foo replica identity full; +delete from foo where f1 = 'qq'; +alter table foo alter f1 TYPE integer; -- fails +alter table foo alter f1 TYPE varchar(10); +--drop table foo; + +create table anothertab (atcol1 serial8, atcol2 boolean, + constraint anothertab_chk check (atcol1 <= 3));; + +insert into anothertab (atcol1, atcol2) values (default, true); +insert into anothertab (atcol1, atcol2) values (default, false); +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol1 type boolean; -- we could support this cast +alter table anothertab alter column atcol1 type integer; + +select * from anothertab order by atcol1, atcol2; + +insert into anothertab (atcol1, atcol2) values (45, null); -- fails +insert into anothertab (atcol1, atcol2) values (default, null); + +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol2 type text + using case when atcol2 is true then 'IT WAS TRUE' + when atcol2 is false then 'IT WAS FALSE' + else 'IT WAS NULL!' end; + +select * from anothertab order by atcol1, atcol2; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab alter column atcol1 drop default; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab drop constraint anothertab_chk; +alter table anothertab drop constraint anothertab_chk; -- fails +alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds + +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; + +select * from anothertab order by atcol1, atcol2; + +--drop table anothertab; +-- alter table anothertab alter column atcol1 default false; +drop sequence anothertab_atcol1_seq; + +create table another (f1 int, f2 text);; + +insert into another values(1, 'one'); +insert into another values(2, 'two'); +insert into another values(3, 'three'); + +select * from another order by f1, f2; + +alter table another + alter f1 type text using f2 || ' more', + alter f2 type bigint using f1 * 10; + +select * from another order by f1, f2; + +--drop table another; + +-- table's row type +create table tab1 (a int, b text); +create table tab2 (x int, y tab1); +alter table tab1 alter column b type varchar; -- fails + +-- disallow recursive containment of row types +-- create table recur1 (f1 int); +-- alter table recur1 add column f2 recur1; -- fails +-- alter table recur1 add column f2 recur1[]; -- fails +-- create domain array_of_recur1 as recur1[]; +-- alter table recur1 add column f2 array_of_recur1; -- fails +-- create table recur2 (f1 int, f2 recur1); +-- alter table recur1 add column f2 recur2; -- fails +-- alter table recur1 add column f2 int; +-- alter table recur1 alter column f2 type recur2; -- fails + +-- SET STORAGE may need to add a TOAST table +create table test_storage (a text); +alter table test_storage alter a set storage plain; +alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table +alter table test_storage alter a set storage extended; -- re-add TOAST table + +select reltoastrelid <> 0 as has_toast_table +from pg_class +where oid = 'test_storage'::regclass; + +-- ALTER TYPE with a check constraint and a child table (bug before Nov 2012) +CREATE TABLE test_inh_check (a float check (a > 10.2)); +CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); +ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +\d test_inh_check +\d test_inh_check_child + +-- +-- lock levels +-- +drop type lockmodes; +create type lockmodes as enum ( + 'AccessShareLock' +,'RowShareLock' +,'RowExclusiveLock' +,'ShareUpdateExclusiveLock' +,'ShareLock' +,'ShareRowExclusiveLock' +,'ExclusiveLock' +,'AccessExclusiveLock' +); + +drop view my_locks; +create or replace view my_locks as +select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode +from pg_locks l join pg_class c on l.relation = c.oid +where virtualtransaction = ( + select virtualtransaction + from pg_locks + where transactionid = txid_current()::integer) +and locktype = 'relation' +and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') +and c.relname != 'my_locks' +group by c.relname; + +create table alterlock (f1 int primary key, f2 text); + +start transaction; alter table alterlock alter column f2 set statistics 150; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock cluster on alterlock_pkey; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set without cluster; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (fillfactor = 100); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock reset (fillfactor); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (toast.autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock alter column f2 set (n_distinct = 1); +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set storage extended; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set default 'x'; +select * from my_locks order by 1; +rollback; + +-- cleanup +--drop table alterlock; +drop view my_locks; +-- drop type lockmodes; + +-- +-- --alter function +-- +--create function test_strict(text) returns text as + 'select coalesce($1, ''got passed a null'');' + language sql returns null on null input; +select test_strict(NULL); +--alter function test_strict(text) called on null input; +select test_strict(NULL); + +--create function non_strict(text) returns text as + 'select coalesce($1, ''got passed a null'');' + language sql called on null input; +select non_strict(NULL); +--alter function non_strict(text) returns null on null input; +select non_strict(NULL); + +-- +-- alter object set schema +-- + +create schema alter1; +create schema alter2; + +-- cannot move table into system built-in schema +create table test1(a int); +alter table test1 set schema dbms_random; +alter table test1 set schema utl_file; + +create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0)); + +create view alter1.v1 as select * from alter1.t1; + +-- --create function alter1.plus1(int) returns int as 'select $1+1' language sql; + +-- create domain alter1.posint integer check (value > 0); + +create type alter1.ctype as (f1 int, f2 text); + +--create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql +as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; + +--create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype); + +--create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as + -- operator 1 alter1.=(alter1.ctype, alter1.ctype); + +-- create conversion alter1.ascii_to_utf8 for 'sql_ascii' to 'utf8' from ascii_to_utf8; + +--create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +--create text search configuration alter1.cfg(parser = alter1.prs); +--create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); +--create text search dictionary alter1.dict(template = alter1.tmpl); + +insert into alter1.t1(f2) values(11); +insert into alter1.t1(f2) values(12); + +alter table alter1.t1 set schema alter2; +alter table alter1.v1 set schema alter2; +--alter function alter1.plus1(int) set schema alter2; +-- alter domain alter1.posint set schema alter2; +--alter operator class alter1.ctype_hash_ops using hash set schema alter2; +--alter operator family alter1.ctype_hash_ops using hash set schema alter2; +--alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2; +--alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2; +alter type alter1.ctype set schema alter2; +--alter conversion alter1.ascii_to_utf8 set schema alter2; +--alter text search parser alter1.prs set schema alter2; +--alter text search configuration alter1.cfg set schema alter2; +--alter text search template alter1.tmpl set schema alter2; +--alter text search dictionary alter1.dict set schema alter2; + +-- this should succeed because nothing is left in alter1 +-- drop schema alter1; + +insert into alter2.t1(f2) values(13); +insert into alter2.t1(f2) values(14); + +select * from alter2.t1 order by f1, f2; + +alter table alter1.t1 alter column f1 drop default; +drop sequence alter1.t1_f1_seq; + +select * from alter2.v1 order by f1, f2; +drop view alter2.v1; + +select alter2.plus1(41); + +-- clean up +-- drop schema alter2 cascade; +-- drop schema alter1 cascade; + +-- +-- composite types +-- + +CREATE TYPE test_type AS (a int); +\d test_type + +ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ADD ATTRIBUTE b text; +\d test_type + +ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +\d test_type + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE b; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE c; -- fails + +ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; + +ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +\d test_type + +ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; +ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +\d test_type + +-- DROP TYPE test_type; + +CREATE TYPE test_type1 AS (a int, b text); +CREATE TABLE test_tbl1 (x int, y test_type1); +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails + +CREATE TYPE test_type2 AS (a int, b text); +-- CREATE TABLE test_tbl2 OF test_type2; +CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails +ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails +ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +\d test_type2 +\d test_tbl2 +\d test_tbl2_subclass + +--drop table test_tbl2_subclass; + +-- This test isn't that interesting on its own, but the purpose is to leave +-- behind a table to test pg_upgrade with. The table has a composite type +-- column in it, and the composite type has a dropped attribute. +CREATE TYPE test_type3 AS (a int); +CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; +ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; + +CREATE TYPE test_type_empty AS (); + +-- +-- typed tables: OF / NOT OF +-- + +CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); +ALTER TYPE tt_t0 DROP ATTRIBUTE z; +CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK +CREATE TABLE tt1 (x int, y bigint); -- wrong base type +CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod +CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order +CREATE TABLE tt4 (x int); -- too few columns +CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns +CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent +CREATE TABLE tt7 (x int, q text, y numeric(8,2)) WITH OIDS; +ALTER TABLE tt7 DROP q; -- OK + +ALTER TABLE tt0 OF tt_t0; +ALTER TABLE tt1 OF tt_t0; +ALTER TABLE tt2 OF tt_t0; +ALTER TABLE tt3 OF tt_t0; +ALTER TABLE tt4 OF tt_t0; +ALTER TABLE tt5 OF tt_t0; +ALTER TABLE tt6 OF tt_t0; +ALTER TABLE tt7 OF tt_t0; + +CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); +ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table +ALTER TABLE tt7 NOT OF; +\d tt7 +drop table tt0; +-- make sure we can drop a constraint on the parent but it remains on the child +CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); +CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent); +ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; +-- should fail +INSERT INTO test_drop_constr_child (c) VALUES (NULL); +--drop table test_drop_constr_parent CASCADE; + +-- +-- IF EXISTS test +-- +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +CREATE TABLE tt8(a int); +CREATE SCHEMA alter2; + +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +\d alter2.tt8 + +--drop table alter2.tt8; +DROP SCHEMA alter2; +--custom script +--create table +CREATE TABLE TBL_DOMAIN +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL +); +--create/recreate primary, unique and foreign key constraints +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) + USING INDEX ; + +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) + USING INDEX ; +\d+ TBL_DOMAIN +--drop table TBL_DOMAIN; + +--create table +CREATE TABLE TBL_CM_MAXTSENDTOHOST +( + I_MODULETYPE NUMBER(38) NOT NULL, + I_MODULENO NUMBER(38) NOT NULL, + I_PLAMODULENO NUMBER(38) NOT NULL, + I_TABLEID NUMBER(38) NOT NULL, + I_OLDMAXTUPLE NUMBER(38) NOT NULL, + I_NEWMAXTUPLE NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '' +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_CM_MAXTSENDTOHOST + ADD PRIMARY KEY (I_PLAMODULENO, I_TABLEID) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); + \d+ TBL_CM_MAXTSENDTOHOST + --drop table TBL_CM_MAXTSENDTOHOST; + +--create table +CREATE TABLE TBL_LICCTRLDESC_DEFAULT +( + I_INDEX NUMBER(38) NOT NULL, + SV_FEATURENAME VARCHAR2(64) NOT NULL, + SV_ITEMNAME VARCHAR2(64) NOT NULL, + I_ITEMTYPE NUMBER(38) NOT NULL, + I_ITEMVALUEMIN NUMBER(38) NOT NULL, + I_ITEMVALUEMAX NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '', + I_STATUS NUMBER(38) NOT NULL +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_LICCTRLDESC_DEFAULT + ADD PRIMARY KEY (I_INDEX) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +--add unique index +CREATE UNIQUE INDEX IDX_TBL_LICCTRL_DEF ON TBL_LICCTRLDESC_DEFAULT (I_INDEX DESC, I_STATUS) + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +\d+ TBL_LICCTRLDESC_DEFAULT + --drop table TBL_LICCTRLDESC_DEFAULT; +--using index clause +CREATE TABLE STUDENTS +( + ID INT, + NAME VARCHAR2(20), + AGE INT, + ADDRESS VARCHAR(30) +); + --alter table to add unique index or primary key +ALTER TABLE STUDENTS ADD UNIQUE (ID) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD CONSTRAINT ZHANGYG UNIQUE (AGE, ADDRESS) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD PRIMARY KEY (AGE) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); +\d+ STUDENTS +--drop table STUDENTS; +--simulate A db's ALTER TABLE gram +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +ALTER TABLE MODIFY_TABLE_A ADD (mychar CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint1 INT, mychar1 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint2 INT, mychar2 CHAR, mychar3 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD a CHAR, ADD b CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A ADD mychar4 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I VARCHAR2(64); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I CHAR, MODIFY myint1 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(12)); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), mychar1 INT); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), myint1 INT); +--drop table MODIFY_TABLE_A; + +create table test_alter_type(a int,b text); +alter table test_alter_type alter column a type regclass; +--drop table test_alter_type; + +create table test_mod(a int,b text); +alter table test_mod alter column a type regclass; +alter table test_mod alter column a set default "d"; +alter table test_mod alter column a set default "d"::int; +alter table test_mod alter column a set default "d"::int + 1; +--drop table test_mod; + +--simulate A db and postgresql, ALTER TABLE IF EXISTS table_name ADD( { element_list_clause } [, ...] ) +--simulate A db and postgresql, ALTER TABLE IF EXISTS table_name MODIFY( { element_list_clause } [, ...] ) +create schema columnar_storage; +create table columnar_storage.create_columnar_add_common_008 (c_tinyint tinyint,c_smallint smallint,c_int integer,c_bigint bigint,c_money money,c_numeric numeric,c_real real,c_double double precision,c_decimal decimal,c_varchar varchar,c_char char(30),c_nvarchar2 nvarchar2,c_text text,c_timestamp timestamp with time zone,c_timestamptz timestamp without time zone,c_date date,c_time time without time zone,c_timetz time with time zone,c_interval interval,c_tinterval tinterval,c_smalldatetime smalldatetime,c_bytea bytea,c_boolean boolean,c_inet inet,c_cidr cidr,c_bit bit(10),c_varbit varbit(10),c_oid oid) with (orientation=column); +alter table if exists columnar_storage.create_columnar_add_common_007 modify (c_int varchar(20)); +alter table if exists columnar_storage.create_columnar_add_common_008 modify (c_int varchar(20), c_double varchar(20)); +select * from columnar_storage.create_columnar_add_common_008; +--drop table columnar_storage.create_columnar_add_common_008; +create table columnar_storage.create_columnar_add_common_008 (c_tinyint tinyint,c_smallint smallint,c_int integer,c_bigint bigint,c_money money,c_numeric numeric,c_real real,c_double double precision,c_decimal decimal,c_varchar varchar,c_char char(30),c_nvarchar2 nvarchar2,c_text text,c_timestamp timestamp with time zone,c_timestamptz timestamp without time zone,c_date date,c_time time without time zone,c_timetz time with time zone,c_interval interval,c_tinterval tinterval,c_smalldatetime smalldatetime,c_bytea bytea,c_boolean boolean,c_inet inet,c_cidr cidr,c_bit bit(10),c_varbit varbit(10),c_oid oid) with (orientation=column); +alter table if exists columnar_storage.create_columnar_add_common_007 add (c_time_008 time without time zone,c_timetz_008 time with time zone); +alter table if exists columnar_storage.create_columnar_add_common_008 add (c_time_008 time without time zone,c_timetz_008 time with time zone); +select * from columnar_storage.create_columnar_add_common_008; +--drop table columnar_storage.create_columnar_add_common_008; +drop schema columnar_storage cascade; + +create table test_drop_column_1 (a int, b int, c int); +create table test_drop_column_2 (a int, b int); +create table test_drop_column_3 (a int, b int); +alter table test_drop_column_1 drop column c; +explain (verbose true, costs false) insert into test_drop_column_1 select * from test_drop_column_2; +insert into test_drop_column_1 select * from test_drop_column_2; +explain (verbose true, costs false) insert into test_drop_column_1 select * from test_drop_column_2 order by 2; +insert into test_drop_column_1 select * from test_drop_column_2 order by 2; +explain (verbose true, costs false) insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.a; +insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.a; +explain (verbose true, costs false) insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b; +insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b; +explain (verbose true, costs false) insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b order by 1, 2; +insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b order by 1, 2; +alter table test_drop_column2 replica identity full; +explain (verbose true, costs false) update test_drop_column_1 set a=test_drop_column_2.a from test_drop_column_2; +update test_drop_column_1 set a=test_drop_column_2.a from test_drop_column_2; +explain (verbose true, costs false) delete from test_drop_column_1 where a in (select a from test_drop_column_2); +alter table test_drop_column_1 replica identity full; +delete from test_drop_column_1 where a in (select a from test_drop_column_2); + +create table test_drop_column_cstore_1 (a int, b int, c int) with (orientation = column); +create table test_drop_column_cstore_2 (a int, b int) with (orientation = column); +create table test_drop_column_cstore_3 (a int) with (orientation = column); +alter table test_drop_column_cstore_1 drop column c; +insert into test_drop_column_cstore_1 select * from test_drop_column_cstore_2; +insert into test_drop_column_cstore_1 select * from test_drop_column_cstore_2 order by 2; +insert into test_drop_column_cstore_1 select test_drop_column_cstore_2.a, test_drop_column_cstore_3.a from test_drop_column_cstore_2, test_drop_column_cstore_3 where test_drop_column_cstore_2.a = test_drop_column_cstore_3.a; + +drop table test_drop_column_1; +drop table test_drop_column_2; +drop table test_drop_column_3; +drop table test_drop_column_cstore_1; +drop table test_drop_column_cstore_2; +drop table test_drop_column_cstore_3; + +create table test_hash (a int, b int); +create sequence test_seq1; +alter table test_hash alter column a type serial; --fail +alter table test_hash alter column a set default nextval('test_seq1'); +insert into test_hash(b) values(generate_series(1,10)); +alter table test_hash add column c serial; --not supported +alter table test_hash add column d int default nextval('test_seq1'); --not supported +alter table test_hash add column e int default nextval('test_seq1')*10; --not supported +--drop table test_hash; +alter table test_hash alter column a drop default; +drop sequence test_seq1; + +-- check column addition within a view (bug #14876) +create table at_base_table(id int, stuff text); +insert into at_base_table values (23, 'skidoo'); +create view at_view_1 as select * from at_base_table bt; +create view at_view_2 as select *, v1 as j from at_view_1 v1; +\d+ at_view_1 +\d+ at_view_2 +explain (verbose, costs off) select * from at_view_2; +select * from at_view_2; + +create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; +\d+ at_view_1 +\d+ at_view_2 +explain (verbose, costs off) select * from at_view_2; +select * from at_view_2; + +drop view at_view_2; +drop view at_view_1; +--drop table at_base_table; + +create table tt_row_rep_1(a int); +alter table tt_row_rep_1 drop column a; + +create table tt_row_rep_2(a int, b int); +alter table tt_row_rep_2 drop column b; +alter table tt_row_rep_2 drop column a; + +create table tt_col_rep_1(a int) with(orientation=column); +alter table tt_col_rep_1 drop column a; + +create table tt_col_rep_2(a int, b int) with(orientation=column); +alter table tt_col_rep_2 drop column b; +alter table tt_col_rep_2 drop column a; + +--drop table tt_row_rep_1; +--drop table tt_row_rep_2; +drop table tt_col_rep_1; +drop table tt_col_rep_2; + +-- renaming constraints with cache reset of target relation +CREATE TABLE constraint_rename_cache (a int, + CONSTRAINT chk_a CHECK (a > 0), + PRIMARY KEY (a)); +ALTER TABLE constraint_rename_cache + RENAME CONSTRAINT chk_a TO chk_a_new; +ALTER TABLE constraint_rename_cache + RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new; +CREATE TABLE like_constraint_rename_cache + (LIKE constraint_rename_cache INCLUDING ALL); +\d like_constraint_rename_cache +--drop table constraint_rename_cache; +--drop table like_constraint_rename_cache; + + + +create table t_alter_type(c0 int4range Unique, foreign key(c0) references t_alter_type(c0)); +alter table t_alter_type alter c0 set data type int4range; + +----drop table t_alter_type; + +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +\d MODIFY_TABLE_A +create table aaa(a integer); +\d aaa +create table bbb(B integer); +\d bbb +create table CCC(c integer); +\d CCC +create table DDD(D integer); +\d DDD +create table EEE("E" integer); +\d EEE +create table FFF("FF" integer); +\d FFF +create table HHH("HH" integer); + +alter table aaa rename a to AA; +\d aaa +create table GGG("GdGG" integer); +alter table CCC rename c to "CC"; +alter table FFF rename FF to ff; -- differnt in b compatibility +alter table HHH rename "HH" to gg; + +rename table public.HHH to public.hhh; +rename table public.hhh to public.hhh1; + +create table aaaaa (b int generated by default as identity,c int); +\dS aaaaa_b_seq +insert into aaaaa(c) values(213); +insert into aaaaa(c) values(21); +insert into aaaaa values(3,121); +insert into aaaaa(c) values(111); +insert into aaaaa values(null,212); +alter table aaaaa alter column b drop default; +drop sequence aaaaa_b_seq; diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.setup b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.setup new file mode 100644 index 0000000000..db2469581c --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.setup @@ -0,0 +1,11 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "create schema fastcheck;set search_path=fastcheck;create table t1_full (a int, b text);insert into t1_full values (1, 'a'), (2, 'b'), (3, 'c');alter table t1_full replica identity full;" +exec_sql_with_user $case_use_db $sub_node1_port "create schema fastcheck;set search_path=fastcheck;create table t1_full (a int, b text, myc int); insert into t1_full values (101, 'a', 1), (102, 'b', 2);" + +exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text);insert into tkey1 values (1, 'a'), (2, 'b'), (3, 'c');alter table tkey1 replica identity default;" +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text, myc int); insert into tkey1 values (101, '101a', 1), (102, '102b', 2);" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.sql new file mode 100644 index 0000000000..32fe5b2706 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.sql @@ -0,0 +1,692 @@ +-- +--FOR BLACKLIST FEATURE: REFERENCES/WITH OIDS/RULE/CREATE TYPE/DOMAIN is not supported. +-- + +-- +-- ALTER_TABLE +-- add attribute +-- +set search_path=fastcheck; +CREATE TABLE atmp1 (initial int4); + +COMMENT ON TABLE tmp_wrong IS 'table comment'; +COMMENT ON TABLE atmp1 IS 'table comment'; +COMMENT ON TABLE atmp1 IS NULL; + +ALTER TABLE atmp1 ADD COLUMN xmin integer; -- fails + +ALTER TABLE atmp1 ADD COLUMN a int4 default 3; + +ALTER TABLE atmp1 ADD COLUMN b name; + +ALTER TABLE atmp1 ADD COLUMN c text; + +ALTER TABLE atmp1 ADD COLUMN d float8; + +ALTER TABLE atmp1 ADD COLUMN e float4; + +ALTER TABLE atmp1 ADD COLUMN f int2; + +ALTER TABLE atmp1 ADD COLUMN g polygon; + +ALTER TABLE atmp1 ADD COLUMN h abstime; + +ALTER TABLE atmp1 ADD COLUMN i char; + +ALTER TABLE atmp1 ADD COLUMN j abstime[]; + +ALTER TABLE atmp1 ADD COLUMN k int4; + +ALTER TABLE atmp1 ADD COLUMN l tid; + +ALTER TABLE atmp1 ADD COLUMN m xid; + +ALTER TABLE atmp1 ADD COLUMN n oidvector; + +--ALTER TABLE atmp1 ADD COLUMN o lock; +ALTER TABLE atmp1 ADD COLUMN p smgr; + +ALTER TABLE atmp1 ADD COLUMN q point; + +ALTER TABLE atmp1 ADD COLUMN r lseg; + +ALTER TABLE atmp1 ADD COLUMN s path; + +ALTER TABLE atmp1 ADD COLUMN t box; + +ALTER TABLE atmp1 ADD COLUMN u tinterval; + +ALTER TABLE atmp1 ADD COLUMN v timestamp; + +ALTER TABLE atmp1 ADD COLUMN w interval; + +ALTER TABLE atmp1 ADD COLUMN x float8[]; + +ALTER TABLE atmp1 ADD COLUMN y float4[]; + +ALTER TABLE atmp1 ADD COLUMN z int2[]; + +INSERT INTO atmp1 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp1; + +----drop table tmp; + +-- the wolf bug - schema mods caused inconsistent row descriptors +CREATE TABLE atmp2 ( + initial int4 +); + +ALTER TABLE atmp2 ADD COLUMN a int4; + +ALTER TABLE atmp2 ADD COLUMN b name; + +ALTER TABLE atmp2 ADD COLUMN c text; + +ALTER TABLE atmp2 ADD COLUMN d float8; + +ALTER TABLE atmp2 ADD COLUMN e float4; + +ALTER TABLE atmp2 ADD COLUMN f int2; + +ALTER TABLE atmp2 ADD COLUMN g polygon; + +ALTER TABLE atmp2 ADD COLUMN h abstime; + +ALTER TABLE atmp2 ADD COLUMN i char; + +ALTER TABLE atmp2 ADD COLUMN j abstime[]; + +ALTER TABLE atmp2 ADD COLUMN k int4; + +ALTER TABLE atmp2 ADD COLUMN l tid; + +ALTER TABLE atmp2 ADD COLUMN m xid; + +ALTER TABLE atmp2 ADD COLUMN n oidvector; + +--ALTER TABLE atmp2 ADD COLUMN o lock; +ALTER TABLE atmp2 ADD COLUMN p smgr; + +ALTER TABLE atmp2 ADD COLUMN q point; + +ALTER TABLE atmp2 ADD COLUMN r lseg; + +ALTER TABLE atmp2 ADD COLUMN s path; + +ALTER TABLE atmp2 ADD COLUMN t box; + +ALTER TABLE atmp2 ADD COLUMN u tinterval; + +ALTER TABLE atmp2 ADD COLUMN v timestamp; + +ALTER TABLE atmp2 ADD COLUMN w interval; + +ALTER TABLE atmp2 ADD COLUMN x float8[]; + +ALTER TABLE atmp2 ADD COLUMN y float4[]; + +ALTER TABLE atmp2 ADD COLUMN z int2[]; + +INSERT INTO atmp2 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp2; + +----drop table tmp; + + +-- +-- rename - check on both non-temp and temp tables +-- +CREATE TABLE atmp3 (regtable int); +-- Enforce use of COMMIT instead of 2PC for temporary objects + + +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); + +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + +CREATE TABLE stud_emp ( + name text, + age int4, + location point, + salary int4, + manager name, + gpa float8, + percent int4 +) with(autovacuum_enabled = off); + +-- ALTER TABLE ... RENAME on non-table relations +-- renaming indexes (FIXME: this should probably test the index's functionality) +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1; + +ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1; + +-- renaming views +CREATE VIEW tmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE tmp_view RENAME TO tmp_view_new; + +DROP VIEW tmp_view_new; +-- toast-like relation name +alter table stud_emp rename to pg_toast_stud_emp; +alter table pg_toast_stud_emp rename to stud_emp; + +-- renaming index should rename constraint as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraint +ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); +ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo; + +-- renaming constraint should rename index as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +DROP INDEX onek_unique1_constraint; -- to see whether it's there +ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo; +DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraints vs. inheritance +CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); +\d constraint_rename_test + +create table test_modify (a int, b int); +alter table test_modify replica identity full; +alter table test_modify modify (b not null enable); +insert into test_modify(b) values (null); +insert into test_modify values (1, null); +alter table test_modify modify(b null); +insert into test_modify values (1, null); +alter table test_modify modify (b not null enable); +alter table test_modify replica identity full; +delete from test_modify; +alter table test_modify modify (a not null, b not null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (a null, b null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (b constraint ak not null); +delete from test_modify; +alter table test_modify modify (b constraint ak not null); +insert into test_modify values(1,1); +insert into test_modify values(1,null); +alter table test_modify modify (b constraint ak null); +insert into test_modify values(1,null); +alter table test_modify modify (a null, a not null); +-- try alter view should fail +create view test_modify_view as select * from test_modify; +alter table test_modify_view modify (a not null enable); +drop view test_modify_view; +--drop table test_modify; + + +-- test setting and removing default values +create table def_test ( + c1 int4 default 5, + c2 text default 'initial_default' +); +insert into def_test default values; +alter table def_test alter column c1 drop default; +insert into def_test default values; +alter table def_test alter column c2 drop default; +insert into def_test default values; +alter table def_test alter column c1 set default 10; +alter table def_test alter column c2 set default 'new_default'; +insert into def_test default values; +select * from def_test order by 1, 2; + +-- set defaults to an incorrect type: this should fail +alter table def_test alter column c1 set default 'wrong_datatype'; +alter table def_test alter column c2 set default 20; + +-- set defaults on a non-existent column: this should fail +alter table def_test alter column c3 set default 30; + +create type mytype as (a text); +create table foo (f1 text, f2 mytype, f3 text); + +insert into foo values('bb','cc','dd'); +select * from foo order by f1; + +-- drop domain mytype cascade; + +select * from foo order by f1; +insert into foo values('qq','rr'); +select * from foo order by f1; +alter table foo replica identity full; +update foo set f3 = 'zz'; +select * from foo order by f1; +select f3,max(f1) from foo group by f3; + +-- Simple tests for alter table column type +alter table foo replica identity full; +delete from foo where f1 = 'qq'; +alter table foo alter f1 TYPE integer; -- fails +alter table foo alter f1 TYPE varchar(10); +--drop table foo; + + +CREATE TABLE TBL_DOMAIN +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL +); +--create/recreate primary, unique and foreign key constraints +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) + USING INDEX ; + +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) + USING INDEX ; +\d+ TBL_DOMAIN +--drop table TBL_DOMAIN; + +--create table +CREATE TABLE TBL_CM_MAXTSENDTOHOST +( + I_MODULETYPE NUMBER(38) NOT NULL, + I_MODULENO NUMBER(38) NOT NULL, + I_PLAMODULENO NUMBER(38) NOT NULL, + I_TABLEID NUMBER(38) NOT NULL, + I_OLDMAXTUPLE NUMBER(38) NOT NULL, + I_NEWMAXTUPLE NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '' +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_CM_MAXTSENDTOHOST + ADD PRIMARY KEY (I_PLAMODULENO, I_TABLEID) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); + \d+ TBL_CM_MAXTSENDTOHOST + --drop table TBL_CM_MAXTSENDTOHOST; + +--create table +CREATE TABLE TBL_LICCTRLDESC_DEFAULT +( + I_INDEX NUMBER(38) NOT NULL, + SV_FEATURENAME VARCHAR2(64) NOT NULL, + SV_ITEMNAME VARCHAR2(64) NOT NULL, + I_ITEMTYPE NUMBER(38) NOT NULL, + I_ITEMVALUEMIN NUMBER(38) NOT NULL, + I_ITEMVALUEMAX NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '', + I_STATUS NUMBER(38) NOT NULL +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_LICCTRLDESC_DEFAULT + ADD PRIMARY KEY (I_INDEX) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +--add unique index +CREATE UNIQUE INDEX IDX_TBL_LICCTRL_DEF ON TBL_LICCTRLDESC_DEFAULT (I_INDEX DESC, I_STATUS) + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +\d+ TBL_LICCTRLDESC_DEFAULT + --drop table TBL_LICCTRLDESC_DEFAULT; +--using index clause +CREATE TABLE STUDENTS +( + ID INT, + NAME VARCHAR2(20), + AGE INT, + ADDRESS VARCHAR(30) +); + --alter table to add unique index or primary key +ALTER TABLE STUDENTS ADD UNIQUE (ID) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD CONSTRAINT ZHANGYG UNIQUE (AGE, ADDRESS) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD PRIMARY KEY (AGE) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); +\d+ STUDENTS +--drop table STUDENTS; +--simulate A db's ALTER TABLE gram +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +ALTER TABLE MODIFY_TABLE_A ADD (mychar CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint1 INT, mychar1 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint2 INT, mychar2 CHAR, mychar3 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD a CHAR, ADD b CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A ADD mychar4 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I VARCHAR2(64); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I CHAR, MODIFY myint1 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(12)); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), mychar1 INT); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), myint1 INT); +--drop table MODIFY_TABLE_A; + + +CREATE SCHEMA test_sche; +CREATE TABLE test_sche.logical_TB1( +c1 integer, +c2 date, +c3 text) +partition by system +( +partition p1, +partition p2, +partition p3); + +insert into test_sche.logical_TB1 partition(p1) values(1,'2022-01-01','p1'); +insert into test_sche.logical_TB1 partition(p2) values(2,'2022-02-01','p2'); +insert into test_sche.logical_TB1 partition(p2) values(3,'2022-02-01','p3'); +truncate test_sche.logical_TB1; +--drop table test_sche.logical_TB1; + +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +\d MODIFY_TABLE_A +create table aaa(a integer); +\d aaa +create table bbb(B integer); +\d bbb +create table CCC(c integer); +\d CCC +create table DDD(D integer); +\d DDD +create table EEE("E" integer); +\d EEE +create table FFF("FF" integer); +\d FFF +create table HHH("HH" integer); + +alter table aaa rename a to AA; +\d aaa +create table GGG("GdGG" integer); +alter table CCC rename c to "CC"; +alter table FFF rename FF to ff; -- differnt in b compatibility +alter table HHH rename "HH" to gg; + +rename table public.HHH to public.hhh; +rename table public.hhh to public.hhh1; + +insert into t1_full values (4,'d'); +insert into t1_full values (5, 'e'); +create type mytyp as (a int, b text); +alter table t1_full add column c timestamp default now() not null first; +alter table t1_full add column d timestamp on update current_timestamp; + +alter table t1_full add column e int auto_increment unique; +alter table t1_full alter column b set data type timestamp using now(); +alter table t1_full add column ff mytyp default(1, now()::text); +alter table t1_full add column ff33 mytyp default(1, current_timestamp(3)::text); + +alter table t1_full rename to t1_repl_index; +alter table t1_repl_index add constraint t1_pkey_a primary key (a); +alter table t1_repl_index replica identity default; +alter table t1_repl_index add column f int auto_increment unique; +alter table t1_repl_index add column f int auto_increment null unique; +alter table t1_repl_index alter column b set data type timestamp using now(); +alter table t1_repl_index add column e timestamp default now() not null; +alter table t1_repl_index alter column e set data type float using random(); +alter table t1_repl_index add column h int default random(); +alter table t1_repl_index add column h int; +alter table t1_repl_index alter column h set data type float; +update t1_repl_index set h=random(); +alter table t1_repl_index add column g timestamp generated always as (b + '1 year'); +insert into t1_repl_index (a) values (200), (201), (202); +alter table t1_repl_index modify column f int; + +insert into tkey1 values (10), (12); +alter table tkey1 modify column b float4 auto_increment unique; +alter table tkey1 modify column b int auto_increment null unique; +alter table tkey1 modify column b int; + +create table blobtbl (id int primary key, a blob, b raw, c clob, d bytea); +alter table blobtbl replica identity default; +insert into blobtbl values (1, utl_raw.cast_to_raw('this is blob'), utl_raw.cast_to_raw('this is raw'), 'this is clob', decode('this is bytea', 'escape')); +insert into blobtbl values (2, utl_raw.cast_to_raw('this is blob2'), utl_raw.cast_to_raw('this is raw2'), 'this is clob2', decode('this is bytea2', 'escape')); +insert into blobtbl values (3, utl_raw.cast_to_raw('this is blob3'), utl_raw.cast_to_raw('this is raw3'), 'this is clob3', decode('this is bytea3', 'escape')); + +update blobtbl set a=utl_raw.cast_to_raw('this is blob after update'), b=utl_raw.cast_to_raw('this is raw after update'), c='this is clob after update', d=decode('this is bytea after i[date]', 'escape') where id=2; +delete from blobtbl where id=3; + +select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col into blobtbl_1 from blobtbl; + +create table blobtbl_2 as (select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col from blobtbl); + +create schema testb; +set search_path='testb'; +create table t1 (a int, b timestamp without time zone); +alter table t1 alter column b set default now(); +alter table t1 modify column b timestamp on update current_timestamp; +insert into t1 (a,b) values (1,default), (2,default),(3,'1900-01-01 1:00:00'); +alter table t1 replica identity full; +create type typ1 as (a int, b text); + +alter table t1 add column c typ1 default(1, now()::text); +alter type typ1 add attribute c timestamp; +alter table t1 add constraint t1_pkey primary key (a); +alter table t1 replica identity default; +alter table t1 alter column b set data type timestamp using now() - a; +create type typ2; +create type typ2 as (a int, b int); +alter type typ2 drop attribute a; +drop type typ2; + +create table tab1_1163900(id int not null,a1 text) partition by range(id); +create table tab2_1163900(id int not null,a1 text) partition by list(id); +create table tab3_1163900(id int not null,a1 text) partition by hash(id); + +create table t1_1163900(id int not null,a1 text); +create table t2_1163900(id int not null,a1 text); +create table t3_1163900(id int not null,a1 text); +--insert; +insert into t1_1163900(id,a1) select generate_series(1,100),'a'; +--t3_1163900; +insert into t3_1163900(id,a1) select generate_series(1,100),'a'; +--t2_1163900; +do $$ +declare +begin +for i in 1..100 loop +insert into t2_1163900 values(20,'a'); +end loop; +end $$; + +--attach; +alter table tab1_1163900 attach partition t1_1163900 for values from (1) to (1000); +alter table tab2_1163900 attach partition t2_1163900 for values in(20); +alter table tab3_1163900 attach partition t3_1163900 for values with(modulus 1,remainder 0); + +create table aaaaa1 (b int generated by default as identity (cycle increment by 10),c int); +-- \dS aaaaa_b_seq +-- insert into aaaaa(c) values(213); +-- insert into aaaaa(c) values(21); +-- insert into aaaaa values(3,121); +-- insert into aaaaa(c) values(111); +-- insert into aaaaa values(null,212); +-- alter table aaaaa alter column b drop default; +-- drop sequence aaaaa_b_seq; + +create table bbbb (a int not null); +alter table bbbb alter column a add generated by default as identity; + +create table genalways(id bigint generated always as identity (start 68 cycle maxvalue 70),name varchar(40)); + +create table genalways2(id smallint generated always as identity (start 68 cycle maxvalue 70),name varchar(40)); + +drop table if exists gentest; +create table gentest(id integer PRIMARY KEY, name varchar(40)); +/* AT_AddIdentity */ +ALTER TABLE gentest ALTER id ADD GENERATED ALWAYS AS IDENTITY (start 12 maxvalue 322); +/* AT_SetIdentity in pg compatibility */ +ALTER TABLE gentest ALTER id SET GENERATED ALWAYS; +ALTER TABLE gentest ALTER id DROP IDENTITY; +ALTER TABLE gentest ALTER id ADD GENERATED BY DEFAULT AS IDENTITY (start 99 maxvalue 1000); +ALTER TABLE gentest ALTER id DROP IDENTITY IF EXISTS; +ALTER TABLE gentest ALTER id ADD GENERATED ALWAYS AS IDENTITY (start 33 maxvalue 333); +ALTER TABLE gentest ALTER id SET GENERATED BY DEFAULT; +ALTER TABLE gentest ALTER id RESTART WITH 123; +ALTER TABLE gentest ALTER id RESTART; + +create table test_tab1(id int, c1 text, c2 text); +alter table test_tab1 alter column c1 SET STATISTICS 50; +alter table test_tab1 alter column c2 SET STATISTICS PERCENT 50; +alter table test_tab1 SET COMPRESS; +alter table test_tab1 SET NOCOMPRESS; +alter table test_tab1 COMMENT = 'test_tab1 COMMENT'; +drop table if exists test_tab1; + +create table t1_z (col1 int primary key auto_increment , col2 text,col3 bigint); +insert into t1_z(col1,col2) values(3, 'aaa'); +alter table t1_z auto_increment = 3; + +alter table t1_z OWNER TO regtest_unpriv_user; + +CREATE RULE test_at_modify_rule AS ON INSERT TO t1_z WHERE (col2 is null) DO INSTEAD UPDATE t1_z SET col2='dsgs'; +CREATE RULE test_at_modify_rule AS ON UPDATE TO t1_z DO ALSO NOTIFY regtest_unpriv_user; +alter table t1_z DISABLE RULE test_at_modify_rule; +alter table t1_z ENABLE RULE test_at_modify_rule; +alter table t1_z DISABLE RULE test_at_modify_rule; +alter table t1_z ENABLE ALWAYS RULE test_at_modify_rule; +drop table if exists t1_z; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.teardown b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.teardown new file mode 100644 index 0000000000..7b0d5e6eb7 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_fastcheck.teardown @@ -0,0 +1,19 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;alter table t1_repl_index drop column myc; alter table tkey1 drop column myc" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;delete from t1_repl_index where a in (101,102); delete from tkey1 where a in (101,102);" + +exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col into blobtbl_pub_1 from blobtbl;" + +exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;create table blobtbl_pub_2 as (select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col from blobtbl);" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;drop table blobtbl_pub_1, blobtbl_pub_2" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col into blobtbl_pub_1 from blobtbl;" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;create table blobtbl_pub_2 as (select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col from blobtbl);" diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.setup b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.setup new file mode 100644 index 0000000000..61d578e9a1 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.setup @@ -0,0 +1,9 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $pub_node1_port "create schema rewrite; set search_path=rewrite;create table t1_full (a int, b text);insert into t1_full values (1, 'a'), (2, 'b'), (3, 'c');alter table t1_full replica identity full;" + +exec_sql $case_use_db $sub_node1_port "create schema rewrite; set search_path=rewrite;create table t1_full (a int, b text, myc int); insert into t1_full values (101, 'a', 1), (102, 'b', 2);" diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.sql new file mode 100644 index 0000000000..d91d9d0bf2 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.sql @@ -0,0 +1,26 @@ +set search_path=rewrite; +insert into t1_full values (4,'d'); +alter table t1_full add column c timestamp default now() not null first; +alter table t1_full add column d timestamp; + +alter table t1_full add column e int unique; +alter table t1_full alter column b set data type timestamp using now(); + +alter table t1_full rename to t1_repl_index; +alter table t1_repl_index add constraint t1_pkey_a primary key (a); +alter table t1_repl_index replica identity default; +alter table t1_repl_index add column f int auto_increment unique; +alter table t1_repl_index alter column b set data type timestamp using now(); +alter table t1_repl_index add column e timestamp default now() not null; +alter table t1_repl_index alter column e set data type float using random(); +alter table t1_repl_index add column h int default random(); +alter table t1_repl_index alter column h set data type float; +update t1_repl_index set h=random(); +alter table t1_repl_index add column g timestamp generated always as (b + '1 year'); + +create table t1 (a int, b timestamp without time zone); +alter table t1 alter column b set default now(); +alter table t1 modify column b timestamp on update current_timestamp; +insert into t1 (a,b) values (1,default), (2,default),(3,'1900-01-01 1:00:00'); +alter table t1 replica identity full; +alter table t1 alter column b set data type timestamp using now() - a; diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.teardown b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.teardown new file mode 100644 index 0000000000..efb719a1f6 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_rewrite.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $sub_node1_port "set search_path=rewrite;alter table t1_repl_index drop column myc" +exec_sql $case_use_db $sub_node1_port "set search_path=rewrite;delete from t1_repl_index where a in (101,102)" diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.setup b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.setup new file mode 100644 index 0000000000..853b24cd5a --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.setup @@ -0,0 +1,24 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $pub_node1_port "create tablespace ts_subpart_hash_1 relative location 'test/ts_subpart_hash_1'"; + +exec_sql $case_use_db $pub_node1_port "create tablespace ts_subpart_hash_2 relative location 'test/ts_subpart_hash_2'"; +exec_sql $case_use_db $pub_node1_port "create tablespace ts_subpart_hash_test_user relative location 'test/ts_subpart_hash_test_user';" +exec_sql $case_use_db $pub_node1_port "create user user_subpart_hash password 'Test@123';" + +exec_sql $case_use_db $pub_node1_port "grant CREATE, USAGE on schema schema_vastbase_subpartition_hash to user_subpart_hash"; +exec_sql $case_use_db $pub_node1_port "grant CREATE on tablespace ts_subpart_hash_test_user to user_subpart_hash;" + +exec_sql $case_use_db $sub_node1_port "create tablespace ts_subpart_hash_1 relative location 'test/ts_subpart_hash_1'"; + +exec_sql $case_use_db $sub_node1_port "create tablespace ts_subpart_hash_2 relative location 'test/ts_subpart_hash_2'"; +exec_sql $case_use_db $sub_node1_port "create tablespace ts_subpart_hash_test_user relative location 'test/ts_subpart_hash_test_user';" +exec_sql $case_use_db $sub_node1_port "create user user_subpart_hash password 'Test@123';" + +exec_sql $case_use_db $sub_node1_port "grant CREATE, USAGE on schema schema_vastbase_subpartition_hash to user_subpart_hash"; +exec_sql $case_use_db $sub_node1_port "grant CREATE on tablespace ts_subpart_hash_test_user to user_subpart_hash;" + diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.sql new file mode 100644 index 0000000000..779a267457 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_alter_table_subpartition.sql @@ -0,0 +1,2156 @@ +CREATE schema schema_vastbase_subpartition_hash; +set search_path to schema_vastbase_subpartition_hash; +-- init +set datestyle = 'ISO, MDY'; +set behavior_compat_options = ''; + +create table t_subpart_normal_table_hash(id int); +create table t_subpart_part_table_hash(id int) +partition by hash(id) +( + partition p1 +); + + + +---------------------------- +-- Hash subpartition syntax +---------------------------- +create table t_subpart_range_hash_1 (id integer, age integer, name varchar(30), sale integer) +partition by range(age) +subpartition by hash(age) +( +partition p1 values less than (10), +partition p2 values less than (100) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values less than (200) +); + +create table t_subpart_list_hash_1 (id integer, age integer, name varchar(30), sale integer) +partition by list(age) +subpartition by hash(age) +( +partition p1 values (1, 2, 3, 4, 5), +partition p2 values (10, 20, 30, 40, 50) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (111, 222, 333) +); + +create table t_subpart_hash_hash_1 (id integer, age integer, name varchar(30), sale integer) +partition by hash(age) +subpartition by hash(age) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 +); + +create table t_subpart_range_hash_2 (id integer, age numeric, name varchar(30), bd date) +partition by range(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (10), +partition p2 values less than (100) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values less than (MAXVALUE) + ( + subpartition sp3, + subpartition sp4 + ) +); + +create table t_subpart_list_hash_2 (id integer, age numeric, name varchar(30), bd date) +partition by list(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (1, 2, 3, 4, 5), +partition p2 values (10, 20, 30, 40, 50) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (100, 200) + ( + subpartition sp3, + subpartition sp4 + ) +); + +create table t_subpart_hash_hash_2 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 + ( + subpartition sp3, + subpartition sp4 + ) +); + +create table t_subpart_range_hash_3 (id integer, age numeric, name text, bd timestamp) +partition by range(id, age) +subpartition by hash(id) + subpartitions 2 +( +partition p1 values less than (10, 10.6789), +partition p2 values less than (100, 12345.6789) + subpartitions 3, +partition p3 values less than (MAXVALUE, MAXVALUE) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_list_hash_3 (id integer, age numeric, name text, bd timestamp) +partition by list(age) +subpartition by hash(id) + subpartitions 2 +( +partition p1 values (10, 10.6789), +partition p2 values (100, 12345.6789) + subpartitions 3, +partition p3 values (DEFAULT) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_hash_hash_3 (id integer, age numeric, name text, bd timestamp) +partition by hash(age) +subpartition by hash(id) + subpartitions 2 +( +partition p1, +partition p2 + subpartitions 3, +partition p3 + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_hash_hash_4 (id integer, age numeric, name text, bd timestamp) +partition by hash(age) +subpartition by hash(id) + subpartitions 2 +partitions 3; + +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, p1.subpartstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename like 't_subpart_range_hash_%' + or p1.tablename like 't_subpart_list_hash_%' + or p1.tablename like 't_subpart_hash_hash_%'; + +select p1.tablename, p1.subparttemplate +from schema_subpartition.v_subpartition p1 +where p1.subparttemplate is not null + and (p1.tablename like 't_subpart_range_hash_%' + or p1.tablename like 't_subpart_list_hash_%' + or p1.tablename like 't_subpart_hash_hash_%'); + +select get_subpart_template('t_subpart_range_hash_1'::regclass, 0) is null; +select pg_get_tabledef('t_subpart_range_hash_1'); +select get_subpart_template('t_subpart_range_hash_2'::regclass, 0); +select get_subpart_template('t_subpart_list_hash_3'::regclass, 2); +select get_subpart_template('t_subpart_hash_hash_4'::regclass, 4); +select pg_get_tabledef('t_subpart_range_hash_2'); +select pg_get_tabledef('t_subpart_list_hash_3'); +select pg_get_tabledef('t_subpart_hash_hash_4'); + +create table t_subpart_range_hash_float4 (col1 float4) +partition by range(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (-34.84), +partition p2 values less than (0), +partition p3 values less than (1004.3) + ( + subpartition sp1, + subpartition sp2 + ), +partition p4 values less than (1.2345678901234e+20) +); + +create table t_subpart_list_hash_float4 (col1 float4) +partition by list(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (-3.1, -3.14, -3.141, -3.1415, -3.14159, -3.141592, -3.1415926), +partition p2 values (0, 10, 100, 1000, 10000) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (1.2345678901234e-20, 1.2345678901234e-10, 1.2345678901234e+10, 1.2345678901234e+20) +); + +create table t_subpart_hash_hash_float4 (col1 float4) +partition by hash(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 +); + +create table t_subpart_range_hash_float8 (col1 float8) +partition by range(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (-34.84), +partition p2 values less than (0), +partition p3 values less than (1004.3) + ( + subpartition sp1, + subpartition sp2 + ), +partition p4 values less than (1.2345678901234e+200) +); + +create table t_subpart_list_hash_float8 (col1 float8) +partition by list(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (-3.1, -3.14, -3.141, -3.1415, -3.14159, -3.141592, -3.1415926), +partition p2 values (0, 10, 100, 1000, 10000) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (1.2345678901234e-200, 1.2345678901234e-100, 1.2345678901234e+100, 1.2345678901234e+200) +); + +create table t_subpart_hash_hash_float8 (col1 float8) +partition by hash(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 +); + +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, p1.subpartstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename like 't_subpart_range_hash_float%' + or p1.tablename like 't_subpart_list_hash_float%' + or p1.tablename like 't_subpart_hash_hash_float%'; + +select p1.tablename, p1.subparttemplate +from schema_subpartition.v_subpartition p1 +where p1.subparttemplate is not null + and (p1.tablename like 't_subpart_range_hash_float%' + or p1.tablename like 't_subpart_list_hash_float%' + or p1.tablename like 't_subpart_hash_hash_float%'); + +create table t_subpart_range_hash_7 (id integer primary key, age numeric, name varchar(30), bd date) +partition by range(id) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (100), +partition p2 values less than (500) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_list_hash_7 (id integer primary key, age numeric, name varchar(30), bd date) +partition by list(id) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (100), +partition p2 values (500) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_hash_hash_7 (id integer primary key, age numeric, name varchar(30), bd date) +partition by hash(id) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ) +); + + +create table t_subpart_range_hash_8 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_range_hash_8 PRIMARY KEY (id, age, name)) +partition by range(age, name) +subpartition by hash(id) +( +partition p1 values less than (20, 'AAA') +); + +create table t_subpart_list_hash_8 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_list_hash_8 PRIMARY KEY (id, age, name)) +partition by list(age) +subpartition by hash(id) +( +partition p1 values (20) +); + +create table t_subpart_hash_hash_8 (id integer, age integer, name char(30), bd date, + CONSTRAINT i_t_subpart_hash_hash_8 PRIMARY KEY (id, age, name)) +partition by hash(age) +subpartition by hash(id) +( +partition p1 +); + +create table t_subpart_range_hash_9 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_range_hash_9 PRIMARY KEY (age, name)) +partition by range(age, name) +subpartition by hash(id) +( +partition p1 values less than (100, 'AAA') +); + +create table t_subpart_list_hash_9 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_list_hash_9 PRIMARY KEY (id, name)) +partition by list(age) +subpartition by hash(id) +( +partition p1 values (100) +); + +create table t_subpart_hash_hash_9 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_hash_hash_9 PRIMARY KEY (bd, name)) +partition by hash(age) +subpartition by hash(id) +( +partition p1 +); + +create unique index i_t_subpart_range_hash_8_1 on t_subpart_range_hash_8 (id, bd); +create unique index i_t_subpart_list_hash_8_1 on t_subpart_list_hash_8 (id, bd); +create unique index i_t_subpart_hash_hash_8_1 on t_subpart_hash_hash_8 (id, bd); + + +create table t_subpart_range_hash_10 (id integer, age numeric, name char(30), bd date) +partition by range(age, name) +subpartition by hash(id) +( +partition p1 values less than (10, 'AAA') + ( + subpartition sp1 + ), +partition p2 values less than (100, 'MAXVALUE') + ( + subpartition sp2, + subpartition sp3 + ) +); + +create table t_subpart_list_hash_10 (id integer, age numeric, name char(30), bd date) +partition by list(age) +subpartition by hash(id) +( +partition p1 values (10) + ( + subpartition sp1 + ), +partition p2 values (100) + ( + subpartition sp2, + subpartition sp3 + ) +); + +create table t_subpart_hash_hash_10 (id integer, age integer, name char(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition p2 + ( + subpartition sp2, + subpartition sp3 + ) +); + +create unique index i_t_subpart_range_hash_10_1 on t_subpart_range_hash_10 (id) local; -- error +create unique index i_t_subpart_range_hash_10_1 on t_subpart_range_hash_10 (name, age) local; -- error +create unique index i_t_subpart_range_hash_10_1 on t_subpart_range_hash_10 (age, name, id) local; +create index i_t_subpart_range_hash_10_2 on t_subpart_range_hash_10 (name, age) local; + +create unique index i_t_subpart_list_hash_10_1 on t_subpart_list_hash_10 (age) local; -- error +create unique index i_t_subpart_list_hash_10_1 on t_subpart_list_hash_10 (name, bd) local; -- error +create unique index i_t_subpart_list_hash_10_1 on t_subpart_list_hash_10 (age, id) local; +create index i_t_subpart_list_hash_10_2 on t_subpart_list_hash_10 (name, age) local; + +create unique index i_t_subpart_hash_hash_10_1 on t_subpart_hash_hash_10 (bd) local; -- error +create unique index i_t_subpart_hash_hash_10_1 on t_subpart_hash_hash_10 (name, bd) local; -- error +create unique index i_t_subpart_hash_hash_10_1 on t_subpart_hash_hash_10 (age, id, bd) local; +create index i_t_subpart_hash_hash_10_2 on t_subpart_hash_hash_10 (age, bd) local; + +create index i_t_subpart_range_hash_10_3 on t_subpart_range_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_index_local + ), +partition p2_idx + ( + subpartition subp2_index_local + ) +); -- error +create index i_t_subpart_range_hash_10_3 on t_subpart_range_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_bd_idx_local + ), +partition p2_idx + ( + subpartition subp2_bd_idx_local, + subpartition subp3_bd_idx_local + ) +); + +create index i_t_subpart_list_hash_10_3 on t_subpart_list_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_index_local + ), +partition p2_idx + ( + subpartition subp2_index_local + ) +); -- error +create index i_t_subpart_list_hash_10_3 on t_subpart_list_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_bd_idx_local + ), +partition p2_idx + ( + subpartition subp2_bd_idx_local, + subpartition subp3_bd_idx_local + ) +); + +create index i_t_subpart_hash_hash_10_3 on t_subpart_hash_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_index_local + ), +partition p2_idx + ( + subpartition subp2_index_local + ) +); -- error +create index i_t_subpart_hash_hash_10_3 on t_subpart_hash_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_bd_idx_local + ), +partition p2_idx + ( + subpartition subp2_bd_idx_local, + subpartition subp3_bd_idx_local + ) +); + +create unique index i_t_subpart_range_hash_10_4 on t_subpart_range_hash_10 (name, age) global; -- error +create unique index i_t_subpart_range_hash_10_4 on t_subpart_range_hash_10 (age, bd) global; +drop index i_t_subpart_range_hash_10_2; +create unique index i_t_subpart_range_hash_10_5 on t_subpart_range_hash_10 (name, age) global; + +create unique index i_t_subpart_list_hash_10_4 on t_subpart_list_hash_10 (name, age) global; -- error +create unique index i_t_subpart_list_hash_10_4 on t_subpart_list_hash_10 (name, bd) global; +drop index i_t_subpart_list_hash_10_2; +create unique index i_t_subpart_list_hash_10_5 on t_subpart_list_hash_10 (name, age) global; + +create unique index i_t_subpart_hash_hash_10_4 on t_subpart_hash_hash_10 (bd, age) global; -- error +create unique index i_t_subpart_hash_hash_10_4 on t_subpart_hash_hash_10 (name, id) global; +drop index i_t_subpart_hash_hash_10_2; +create unique index i_t_subpart_hash_hash_10_5 on t_subpart_hash_hash_10 (bd, age) global; + + +select p1.tablename, p1.relname, p1.reltoastidxid, p1.indextblid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_range_hash_7', 't_subpart_range_hash_8', 't_subpart_range_hash_10'); + +select p1.tablename, p1.relname, p1.reltoastidxid, p1.indextblid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_list_hash_7', 't_subpart_list_hash_8', 't_subpart_list_hash_10'); + +select p1.tablename, p1.relname, p1.reltoastidxid, p1.indextblid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_hash_hash_7', 't_subpart_hash_hash_8', 't_subpart_hash_hash_10'); + + +select * from pg_indexes where tablename like 't_subpart_range_hash_%' order by tablename, indexname; +select * from pg_indexes where tablename like 't_subpart_list_hash_%' order by tablename, indexname; +select * from pg_indexes where tablename like 't_subpart_hash_hash_%' order by tablename, indexname; + + +-- \d +\d t_subpart_range_hash_8 +\d t_subpart_list_hash_8 +\d t_subpart_hash_hash_8 +\d t_subpart_range_hash_10 +\d t_subpart_list_hash_10 +\d t_subpart_hash_hash_10 + + + +create table t_subpart_range_hash_11 (id integer, age numeric, name varchar(30), bd date) +partition by range(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1 values less than (10), +partition p2 values less than (100) tablespace ts_subpart_hash_2, +partition p3 values less than (1000) + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 values less than (MAXVALUE) tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +create table t_subpart_list_hash_11 (id integer, age numeric, name varchar(30), bd date) +partition by list(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1 values (10), +partition p2 values (20) tablespace ts_subpart_hash_2, +partition p3 values (30) + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 values (DEFAULT) tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +create table t_subpart_hash_hash_11 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1, +partition p2 tablespace ts_subpart_hash_2, +partition p3 + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +create table t_subpart_hash_hash_11_2 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartitions 3 store in (ts_subpart_hash_1, ts_subpart_hash_2) +partitions 5 store in (ts_subpart_hash_2, ts_subpart_hash_1); + +alter table t_subpart_hash_hash_11_2 add partition p6; +alter table t_subpart_hash_hash_11_2 modify partition p6 add subpartition p6_sp3; + +select p1.tablename, p1.relname, p1.parttype, p2.spcname tablespace_name +from schema_subpartition.v_subpartition p1 left join pg_tablespace p2 on p1.reltablespace = p2.oid +where p1.tablename in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11', 't_subpart_hash_hash_11_2') +order by p1.parentid, p1.oid; + +select p1.tablename, p1.subparttemplate +from schema_subpartition.v_subpartition p1 +where p1.subparttemplate is not null + and p1.tablename in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11', 't_subpart_hash_hash_11_2'); + + +SET SESSION AUTHORIZATION user_subpart_hash PASSWORD 'Test@123'; +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_test_user, + subpartition sp2 tablespace ts_subpart_hash_1 + ) +( +partition p1 +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 tablespace ts_subpart_hash_test_user, +partition p2 tablespace ts_subpart_hash_1 +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 tablespace ts_subpart_hash_test_user, + subpartition sp2 tablespace ts_subpart_hash_1 + ) +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartitions 2 store in (ts_subpart_hash_test_user, ts_subpart_hash_1) +( +partition p1 +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +partitions 2 store in (ts_subpart_hash_test_user, ts_subpart_hash_1); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 + subpartitions 2 store in (ts_subpart_hash_test_user, ts_subpart_hash_1) +); + +RESET SESSION AUTHORIZATION; + + + +---------------------------- +-- syntax error +---------------------------- +create table t_subpart_error (id integer, name varchar(30)) +partition by range(id) +( +partition p1 values less than (10) + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(id) +( +partition p1 values (10) + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +( +partition p1 + ( + subpartition sp1 + ) +); + + +create table t_subpart_error (id integer, name varchar(30)) +partition by range(name) +subpartition by hash(id) +( +partition p1 values less than ('a') + ( + subpartition sp1 + ), +partition p2 values less than ('A') + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(name) +subpartition by hash(id) +( +partition p1 values ('a') + ( + subpartition sp1 + ), +partition p2 values ('A') + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name int8) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition p2 + ( + subpartition sp1 + ) +); + + +create table t_subpart_error (id integer, name varchar(30)) +partition by range(name) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp1 + ) +( +partition p1 values less than ('a') +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(name) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp1 + ) +( +partition p1 values ('a') +); + +create table t_subpart_error (id integer, name int2) +partition by hash(name) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp1 + ) +( +partition p1 +); + + + +create table t_subpart_error (id integer, name varchar(30)) +partition by range(name) +subpartition by hash(id) +( +partition p1 values less than ('10') + ( + subpartition sp1 + ), +partition p2 values less than ('100') + ( + subpartition p1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(id) +subpartition by hash(id) +( +partition p1 values ('10') + ( + subpartition sp1 + ), +partition p2 values ('100') + ( + subpartition p1 + ) +); + +create table t_subpart_error (id integer, name int4) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition p2 + ( + subpartition p1 + ) +); +create table t_subpart_error (id integer, name int4) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition sp1 + ( + subpartition sp2 + ) +); + + + +create table t_subpart_error (id integer, name text) +partition by range(name) +subpartition by hash(id) +( +partition p1 values less than ('10') + ( + subpartition p2_subpartdefault1 + ), +partition p2 values less than ('100') +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name text) +partition by list(name) +subpartition by hash(id) +( +partition p1 values ('10') + ( + subpartition p2_subpartdefault1 + ), +partition p2 values ('100') +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name integer) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition p2_subpartdefault1 + ), +partition p2 +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(id) +( +partition p1, +partition p2 + ( + subpartition p1_subpartdefault1 + ) +); +drop table t_subpart_error; + + +create table t_subpart_error (id integer, name varchar(30), age int, bd varchar(30), addr varchar(30)) +partition by hash(id) +subpartition by hash(id, name, age, bd, addr) +( +partition p1 +); + + +create table t_subpart_error (id integer, name varchar(30), m money) +partition by hash(id) +subpartition by hash(m) +( +partition p1 +); +create table t_subpart_error (id integer, name varchar(30), m money) -- now is ok +partition by hash(id) +subpartition by hash(name) +( +partition p1 +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name varchar(30), bd date) -- now is ok +partition by hash(id) +subpartition by hash(bd) +( +partition p1 +); +drop table t_subpart_error; + + +create table t_subpart_error (id integer, name varchar(30), age int) +partition by hash(id) +subpartition by hash(age) +( +partition p1 + ( + subpartition sp1 values less than (1) + ) +); +create table t_subpart_error (id integer, name varchar(30), age int) +partition by hash(id) +subpartition by hash(age) +( +partition p1 + ( + subpartition sp1 end (1) + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) +( +partition p1 + ( + subpartition sp1 values ('a', 'b') + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values less than (1) + ) +( +partition p1 +); +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 end (1) + ) +( +partition p1 +); +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values ('a') + ) +( +partition p1 +); + + +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 + ( + subpartition ssp1 values (DEFAULT) + ) + ) +( +partition p1 +); + + +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values (DEFAULT) + ) +( +partition p1 +); +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values (DEFAULT), + subpartition sp2 + ) +( +partition p1 +); +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) +( +partition p1 + ( + subpartition sp1 values (DEFAULT) + ) +); +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) +( +partition p1 + ( + subpartition sp1 values (DEFAULT), + subpartition sp2 + ) +); + + + +alter table t_subpart_hash_hash_2 drop column age; +alter table t_subpart_hash_hash_2 drop column id; +alter table t_subpart_hash_hash_2 modify (age numeric(6,1)); +alter table t_subpart_hash_hash_2 modify (id text); + + +alter table t_subpart_range_hash_1 add partition p4 values less than (300); +alter table t_subpart_range_hash_1 add partition p5 start (300) end (400) +( + subpartition sp3, + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_range_hash_1 add partition p6 values less than (500) +( + subpartition sp6, + subpartition sys_subp4294967295 +); + +alter table t_subpart_list_hash_1 add partition p4 values (300); +alter table t_subpart_list_hash_1 add partition p5 values (400) +( + subpartition sp3, + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_list_hash_1 add partition p6 values (500) +( + subpartition sp6, + subpartition sys_subp4294967295 +); + +alter table t_subpart_hash_hash_1 add partition p4; +alter table t_subpart_hash_hash_1 add partition p5 +( + subpartition sp3, + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_hash_hash_1 add partition p6 +( + subpartition sp6, + subpartition sys_subp4294967295 +); + + +alter table t_subpart_range_hash_7 add partition p3 end (1000); +alter table t_subpart_range_hash_7 add partition p4 values less than (2000) +( + subpartition sp3, + subpartition p5_sp2 +); +alter table t_subpart_range_hash_10 add partition p3 values less than (MAXVALUE, MAXVALUE) +( + subpartition sp4, + subpartition sp5 +); + +alter table t_subpart_list_hash_7 add partition p3 values (1000); +alter table t_subpart_list_hash_7 add partition p4 values (2000) +( + subpartition sp3, + subpartition p5_sp2 +); +alter table t_subpart_list_hash_10 add partition p3 values (DEFAULT) +( + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_hash_hash_7 add partition p3; +alter table t_subpart_hash_hash_7 add partition p4 +( + subpartition sp3, + subpartition p5_sp2 +); +alter table t_subpart_hash_hash_10 add partition p3 +( + subpartition sp4, + subpartition sp5 +); + + + +alter table t_subpart_normal_table_hash add partition p1; +alter table t_subpart_normal_table_hash add partition p2 +( + subpartition sp1, + subpartition sp2 +); + +alter table t_subpart_part_table_hash add partition p2 +( + subpartition sp1, + subpartition sp2 +); + + +alter table t_subpart_range_hash_1 add partition p_error values (500); +alter table t_subpart_range_hash_1 add partition p_error; +alter table t_subpart_range_hash_1 add partition p7 end (500) +( + subpartition sp_error values less than (100) +); +alter table t_subpart_range_hash_1 add partition p7 end (500) +( + subpartition sp_error start (100) +); +alter table t_subpart_range_hash_1 add partition p7 end (500) +( + subpartition sp_error values (0) +); + +alter table t_subpart_list_hash_1 add partition p_error values less than (500); +alter table t_subpart_list_hash_1 add partition p_error; +alter table t_subpart_list_hash_1 add partition p7 values (700) +( + subpartition sp_error end (100) +); +alter table t_subpart_list_hash_1 add partition p7 values (700) +( + subpartition sp_error values (0) +); + +alter table t_subpart_hash_hash_1 add partition p_error values less than (500); +alter table t_subpart_hash_hash_1 add partition p_error end (500); +alter table t_subpart_hash_hash_1 add partition p_error values (0); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error values less than (100) +); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error end (100) +); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error values (1) +); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error +); +alter table t_subpart_hash_hash_1 add partitions 1; +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartitions 2 +); + + + +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition sp3, + subpartition sp3 +); +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition sp3, + subpartition p_error +); +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition sp1, + subpartition sp22 +); +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition p1 +); +alter table t_subpart_hash_hash_7 add partition p3; +alter table t_subpart_hash_hash_7 add partition sp1; + + +alter table t_subpart_range_hash_7 add partition p5 values less than (MAXVALUE); +alter table t_subpart_list_hash_7 add partition p5 values (DEFAULT); +alter table t_subpart_hash_hash_7 add partition p5; + + + +alter table t_subpart_range_hash_10 add partition p_error values less than (9999, 9999); +alter table t_subpart_list_hash_10 add partition p_error values (9999); +alter table t_subpart_hash_hash_10 add partition p_error; + + + + +alter table t_subpart_range_hash_1 modify partition p2 add subpartition p2_sp20; +alter table t_subpart_range_hash_10 modify partition p1 add subpartition p1_sp22; + +alter table t_subpart_list_hash_1 modify partition p2 add subpartition p2_sp20; +alter table t_subpart_list_hash_10 modify partition p1 add subpartition p1_sp22; + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition p2_sp20; +alter table t_subpart_hash_hash_10 modify partition p1 add subpartition p1_sp22; + + +alter table t_subpart_normal_table_hash modify partition p1 add subpartition sp1; +alter table t_subpart_normal_table_hash modify partition p1 add subpartition sp1 +( + subpartition sp3, + subpartition sp4 +); + +alter table t_subpart_part_table_hash modify partition p1 add subpartition sp1; +alter table t_subpart_part_table_hash modify partition p1 add subpartition sp1 +( + subpartition sp3, + subpartition sp4 +); + + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp_error values less than (10); +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp_error end (10); +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp_error values (1000); + + +alter table t_subpart_range_hash_1 modify partition p_error add subpartition sp_error; +alter table t_subpart_range_hash_1 modify partition for (999) add subpartition sp_error; +alter table t_subpart_list_hash_1 modify partition p_error add subpartition sp_error; +alter table t_subpart_list_hash_1 modify partition for (999) add subpartition sp_error; +alter table t_subpart_hash_hash_1 modify partition p_error add subpartition sp21; +alter table t_subpart_hash_hash_1 modify partition for (999) add subpartition sp_error; + + +alter table t_subpart_range_hash_1 modify partition p2 add subpartition sp1; +alter table t_subpart_range_hash_1 modify partition p2 add subpartition sp3; +alter table t_subpart_range_hash_1 modify partition p2 add subpartition p1; + +alter table t_subpart_list_hash_1 modify partition p2 add subpartition sp1; +alter table t_subpart_list_hash_1 modify partition p2 add subpartition sp3; +alter table t_subpart_list_hash_1 modify partition p2 add subpartition p1; + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp1; +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp3; +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition p1; + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartitions 1; + + +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_range_hash_1', 't_subpart_range_hash_7', 't_subpart_range_hash_10', + 't_subpart_list_hash_1', 't_subpart_list_hash_7', 't_subpart_list_hash_10', + 't_subpart_hash_hash_1', 't_subpart_hash_hash_7', 't_subpart_hash_hash_10'); + + + + +create table t_subpart_hash_hash_13 (id integer, age int) +partition by hash(id) +subpartition by hash(age) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1, +partition p2 tablespace ts_subpart_hash_2, +partition p3 + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +alter table t_subpart_hash_hash_13 add partition p5; +alter table t_subpart_hash_hash_13 add partition p6 tablespace ts_subpart_hash_2; +alter table t_subpart_hash_hash_13 add partition p7 tablespace ts_subpart_hash_2 +( + subpartition sp5, + subpartition sp6 tablespace ts_subpart_hash_1 +); +alter table t_subpart_hash_hash_13 add partition p8 +( + subpartition sp7, + subpartition sp8 tablespace ts_subpart_hash_1 +); + +alter table t_subpart_hash_hash_13 modify partition p1 add subpartition p1_sp20; +alter table t_subpart_hash_hash_13 modify partition p1 add subpartition p1_sp21 tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_13 modify partition p2 add subpartition p2_sp22; +alter table t_subpart_hash_hash_13 modify partition p2 add subpartition p2_sp23 tablespace ts_subpart_hash_1; + + +SET SESSION AUTHORIZATION user_subpart_hash PASSWORD 'Test@123'; +create table t_subpart_hash_hash_14 (id integer, age integer) +partition by hash (id) +subpartition by hash (age) +( +partition p1 + ( + subpartition sp1 + ) +); + +alter table t_subpart_hash_hash_14 add partition p2 tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_14 add partition p2 +( + subpartition sp2, + subpartition sp3 tablespace ts_subpart_hash_1 +); +alter table t_subpart_hash_hash_14 modify partition p1 add subpartition p1_sp2 tablespace ts_subpart_hash_1; + +drop table t_subpart_hash_hash_14; +RESET SESSION AUTHORIZATION; + +select p1.tablename, p1.relname, p1.parttype, p2.spcname tablespace_name +from schema_subpartition.v_subpartition p1 left join pg_tablespace p2 on p1.reltablespace = p2.oid +where p1.tablename = 't_subpart_hash_hash_13' +order by p1.parentid, p1.oid; + + + +alter table t_subpart_range_hash_1 drop partition p6; +alter table t_subpart_range_hash_1 drop partition for (350); -- drop p5 +alter table t_subpart_range_hash_7 drop partition p3; +alter table t_subpart_range_hash_10 drop partition for (1, 'A'); -- drop p1 + +alter table t_subpart_list_hash_1 drop partition p6; +alter table t_subpart_list_hash_1 drop partition for (400); -- drop p5 +alter table t_subpart_list_hash_7 drop partition p3; +alter table t_subpart_list_hash_10 drop partition for (10); -- drop p1 + +alter table t_subpart_hash_hash_1 drop partition p6; +alter table t_subpart_hash_hash_1 drop partition for (4); -- drop p5 +alter table t_subpart_hash_hash_7 drop partition p3; +alter table t_subpart_hash_hash_10 drop partition for (10); -- drop p1 +alter table t_subpart_hash_hash_13 drop partition p4; + + + +alter table t_subpart_range_hash_1 drop partition p_error; +alter table t_subpart_range_hash_7 drop partition for (9999); + +alter table t_subpart_list_hash_1 drop partition p_error; +alter table t_subpart_list_hash_7 drop partition for (9999); + +alter table t_subpart_hash_hash_1 drop partition p_error; +alter table t_subpart_hash_hash_7 drop partition for (9999); + + + +alter table t_subpart_list_hash_10 drop partition p2; + + + +alter table t_subpart_range_hash_10 drop partition p3; -- ok +alter table t_subpart_range_hash_10 drop partition p2; -- error + +alter table t_subpart_list_hash_10 drop partition p3; -- ok +alter table t_subpart_list_hash_10 drop partition p2; -- error + +alter table t_subpart_hash_hash_10 drop partition p3; -- error +alter table t_subpart_hash_hash_10 drop partition p2; + + + +alter table t_subpart_range_hash_1 drop subpartition sp1; +alter table t_subpart_range_hash_7 drop subpartition for (100, 101); -- drop sp2 + +alter table t_subpart_list_hash_1 drop subpartition sp1; +alter table t_subpart_list_hash_7 drop subpartition for (500, 101); -- drop sp2 + +alter table t_subpart_hash_hash_1 drop subpartition sp1; +alter table t_subpart_hash_hash_7 drop subpartition for (1, 9); -- drop sp2 +alter table t_subpart_hash_hash_13 drop subpartition sp2; +alter table t_subpart_hash_hash_13 drop subpartition for (4, 100); -- drop p5_sp1 + +alter table t_subpart_range_hash_1 drop subpartition sp_error; +alter table t_subpart_range_hash_7 drop subpartition for (100, 1); + +alter table t_subpart_list_hash_1 drop subpartition sp_error; +alter table t_subpart_list_hash_7 drop subpartition for (500, 1); + +alter table t_subpart_hash_hash_1 drop subpartition sp_error; +alter table t_subpart_hash_hash_7 drop subpartition for (501, 1); + + +alter table t_subpart_range_hash_7 drop subpartition sp1; +alter table t_subpart_list_hash_7 drop subpartition sp1; +alter table t_subpart_hash_hash_7 drop subpartition sp1; + + + +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_range_hash_1', 't_subpart_range_hash_7', 't_subpart_range_hash_10', + 't_subpart_list_hash_1', 't_subpart_list_hash_7', 't_subpart_list_hash_10', + 't_subpart_hash_hash_1', 't_subpart_hash_hash_7', 't_subpart_hash_hash_10'); + + +select p1.tablename, p1.relname, p1.parttype, p2.spcname tablespace_name +from schema_subpartition.v_subpartition p1 left join pg_tablespace p2 on p1.reltablespace = p2.oid +where p1.tablename = 't_subpart_hash_hash_13' +order by p1.parentid, p1.oid; + + + + +select * from t_subpart_range_hash_1 partition (p2); +select * from t_subpart_range_hash_1 partition for (10); +select * from t_subpart_range_hash_1 subpartition (sp2); +select * from t_subpart_range_hash_1 subpartition for (50, 51); + +select * from t_subpart_list_hash_1 partition (p2); +select * from t_subpart_list_hash_1 partition for (10); +select * from t_subpart_list_hash_1 subpartition (sp2); +select * from t_subpart_list_hash_1 subpartition for (50, 51); + +select * from t_subpart_hash_hash_1 partition (p2); +select * from t_subpart_hash_hash_1 partition for (1); +select * from t_subpart_hash_hash_1 subpartition (sp2); +select * from t_subpart_hash_hash_1 subpartition for (51, 51); + + + +update t_subpart_range_hash_1 partition (p2) set id = id + 10; +update t_subpart_range_hash_1 partition for (10) set id = id + 10; +update t_subpart_range_hash_1 subpartition (sp2) set id = id + 10; +update t_subpart_range_hash_1 subpartition for (50, 51) set id = id + 10; + +update t_subpart_list_hash_1 partition (p2) set id = id + 10; +update t_subpart_list_hash_1 partition for (10) set id = id + 10; +update t_subpart_list_hash_1 subpartition (sp2) set id = id + 10; +update t_subpart_list_hash_1 subpartition for (50, 51) set id = id + 10; + +update t_subpart_hash_hash_1 partition (p2) set id = id + 10; +update t_subpart_hash_hash_1 partition for (1) set id = id + 10; +update t_subpart_hash_hash_1 subpartition (sp2) set id = id + 10; +update t_subpart_hash_hash_1 subpartition for (51, 51) set id = id + 10; + + + + +delete from t_subpart_range_hash_1 partition (p2); +delete from t_subpart_range_hash_1 partition for (10); +delete from t_subpart_range_hash_1 subpartition (sp2); +delete from t_subpart_range_hash_1 subpartition for (50, 51); + +delete from t_subpart_list_hash_1 partition (p2); +delete from t_subpart_list_hash_1 partition for (10); +delete from t_subpart_list_hash_1 subpartition (sp2); +delete from t_subpart_list_hash_1 subpartition for (50, 51); + +delete from t_subpart_hash_hash_1 partition (p2); +delete from t_subpart_hash_hash_1 partition for (1); +delete from t_subpart_hash_hash_1 subpartition (sp2); +delete from t_subpart_hash_hash_1 subpartition for (51, 51); + + + +-- range-hash +create table t_subpart_range_hash_20 (id integer, name text) +partition by range(name) +subpartition by hash(name) +( +partition p1 values less than ('e'), +partition p2 values less than ('k') + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values less than (MAXVALUE) + ( + subpartition sp3, + subpartition sp4 + ) +); +insert into t_subpart_range_hash_20 values (1,'a'); +insert into t_subpart_range_hash_20 values (2,'e'); +insert into t_subpart_range_hash_20 values (3,'g'); +insert into t_subpart_range_hash_20 values (4,'m'); +insert into t_subpart_range_hash_20 values (5,'r'); +insert into t_subpart_range_hash_20 values (6,NULL); + +explain(costs off) select * from t_subpart_range_hash_20; + +explain(costs off) select * from t_subpart_range_hash_20 where name is null; +select * from t_subpart_range_hash_20 where name is null; +explain(costs off) select * from t_subpart_range_hash_20 where name is not null; +select * from t_subpart_range_hash_20 where name is not null; + +explain(costs off) select * from t_subpart_range_hash_20 where name = 'e'; +select * from t_subpart_range_hash_20 where name = 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name > 'e'; +select * from t_subpart_range_hash_20 where name > 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name >= 'e'; +select * from t_subpart_range_hash_20 where name >= 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name < 'e'; +select * from t_subpart_range_hash_20 where name < 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name <= 'e'; +select * from t_subpart_range_hash_20 where name <= 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name <> 'e'; +select * from t_subpart_range_hash_20 where name <> 'e'; + +explain(costs off) select * from t_subpart_range_hash_20 where name = 'e' and name is null; +select * from t_subpart_range_hash_20 where name = 'e' and name is null; +explain(costs off) select * from t_subpart_range_hash_20 where name = 'e' or name is null; +select * from t_subpart_range_hash_20 where name = 'e' or name is null; + +explain(costs off) select * from t_subpart_range_hash_20 where name in ('r', NULL); +select * from t_subpart_range_hash_20 where name in ('r', NULL); +explain(costs off) select * from t_subpart_range_hash_20 where name = any(array['e', 'g']) or name in ('r', NULL); +select * from t_subpart_range_hash_20 where name = any(array['e', 'g']) or name in ('r', NULL); + + +-- list-hash +create table t_subpart_list_hash_20 (id integer, age integer, name text) +partition by list(age) +subpartition by hash(name) +( +partition p1 values (1, 2, 3), +partition p2 values (10, 20, 50, 60) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (DEFAULT) + ( + subpartition sp3, + subpartition sp4 + ) +); +insert into t_subpart_list_hash_20 values (1, 1, NULL); +insert into t_subpart_list_hash_20 values (2, 20, 'b'); +insert into t_subpart_list_hash_20 values (3, 50, 'f'); +insert into t_subpart_list_hash_20 values (4, 100, NULL); +insert into t_subpart_list_hash_20 values (5, NULL, 'g'); +insert into t_subpart_list_hash_20 values (6, NULL, NULL); + +explain(costs off) select * from t_subpart_list_hash_20; + +explain(costs off) select * from t_subpart_list_hash_20 where age is null; +select * from t_subpart_list_hash_20 where age is null; +explain(costs off) select * from t_subpart_list_hash_20 where age is not null; +select * from t_subpart_list_hash_20 where age is not null; +explain(costs off) select * from t_subpart_list_hash_20 where name is null; +select * from t_subpart_list_hash_20 where name is null; +explain(costs off) select * from t_subpart_list_hash_20 where name is not null; +select * from t_subpart_list_hash_20 where name is not null; + +explain(costs off) select * from t_subpart_list_hash_20 where age is null and name is null; +select * from t_subpart_list_hash_20 where age is null and name is null; +explain(costs off) select * from t_subpart_list_hash_20 where age is null or name is null; +select * from t_subpart_list_hash_20 where age is null or name is null; + +explain(costs off) select * from t_subpart_list_hash_20 where age = 20; +select * from t_subpart_list_hash_20 where age = 20; +explain(costs off) select * from t_subpart_list_hash_20 where name = 'b'; +select * from t_subpart_list_hash_20 where name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 and name = 'b'; +select * from t_subpart_list_hash_20 where age = 20 and name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 or name = 'b'; +select * from t_subpart_list_hash_20 where age = 20 or name = 'b'; + +explain(costs off) select * from t_subpart_list_hash_20 where age is null and name = 'b'; +select * from t_subpart_list_hash_20 where age is null and name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age is null or name = 'b'; +select * from t_subpart_list_hash_20 where age is null or name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 and name is null; +select * from t_subpart_list_hash_20 where age = 20 and name is null; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 or name is null; +select * from t_subpart_list_hash_20 where age = 20 or name is null; + +explain(costs off) select * from t_subpart_list_hash_20 where name = any(array['g', NULL]); +select * from t_subpart_list_hash_20 where name = any(array['g', NULL]); +explain(costs off) select * from t_subpart_list_hash_20 where age in (20, 200) and name = any(array['g', NULL]); +select * from t_subpart_list_hash_20 where age in (20, 200) and name = any(array['g', NULL]); + + +-- hash-hash +create table t_subpart_hash_hash_20 (id integer, name text, bd time) +partition by hash(name) +subpartition by hash(bd) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 + ( + subpartition sp3, + subpartition sp4 + ) +); +insert into t_subpart_hash_hash_20 values (1, 'a', '1:2:3'); +insert into t_subpart_hash_hash_20 values (2, 'g', NULL); +insert into t_subpart_hash_hash_20 values (3, 'h', '11:2:3'); +insert into t_subpart_hash_hash_20 values (4, 'o', NULL); +insert into t_subpart_hash_hash_20 values (5, 't', '21:0:0'); +insert into t_subpart_hash_hash_20 values (6, NULL, NULL); + +explain(costs off) select * from t_subpart_hash_hash_20; + +explain(costs off) select * from t_subpart_hash_hash_20 where name is null; +select * from t_subpart_hash_hash_20 where name is null; +explain(costs off) select * from t_subpart_hash_hash_20 where name is not null; +select * from t_subpart_hash_hash_20 where name is not null; +explain(costs off) select * from t_subpart_hash_hash_20 where bd is null; +select * from t_subpart_hash_hash_20 where bd is null; +explain(costs off) select * from t_subpart_hash_hash_20 where bd is not null; +select * from t_subpart_hash_hash_20 where bd is not null; +explain(costs off) select * from t_subpart_hash_hash_20 where name is null and bd is null; +select * from t_subpart_hash_hash_20 where name is null and bd is null; + +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g'; +select * from t_subpart_hash_hash_20 where name = 'g'; +explain(costs off) select * from t_subpart_hash_hash_20 where bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' and bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name = 'g' and bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' or bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name = 'g' or bd = '11:2:3'; + +explain(costs off) select * from t_subpart_hash_hash_20 where name is null and bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name is null and bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name is null or bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name is null or bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' and bd is null; +select * from t_subpart_hash_hash_20 where name = 'g' and bd is null; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' or bd is null; +select * from t_subpart_hash_hash_20 where name = 'g' or bd is null; + +explain(costs off) select * from t_subpart_hash_hash_20 where bd = any(array['11:2:3'::time, '21:0:0'::time]); +select * from t_subpart_hash_hash_20 where bd = any(array['11:2:3'::time, '21:0:0'::time]); +explain(costs off) select * from t_subpart_hash_hash_20 where name in ('g','o') and bd = any(array['11:2:3'::time, '21:0:0'::time]); +select * from t_subpart_hash_hash_20 where name in ('g','o') and bd = any(array['11:2:3'::time, '21:0:0'::time]); + + + +---------------------------- +-- truncate partition & subpartition +---------------------------- +-- PARTITION [FOR] +alter table t_subpart_range_hash_1 truncate partition p1; +alter table t_subpart_range_hash_1 truncate partition for (10); +alter table t_subpart_range_hash_10 truncate partition p2; +alter table t_subpart_range_hash_10 truncate partition for (10, 'MAXVALUE'); + +alter table t_subpart_list_hash_1 truncate partition p1; +alter table t_subpart_list_hash_1 truncate partition for (10); +alter table t_subpart_list_hash_10 truncate partition p2; +alter table t_subpart_list_hash_10 truncate partition for (100); + +alter table t_subpart_hash_hash_1 truncate partition p1; +alter table t_subpart_hash_hash_1 truncate partition for (0); +alter table t_subpart_hash_hash_7 truncate partition p1; +alter table t_subpart_hash_hash_7 truncate partition for (100); +alter table t_subpart_hash_hash_10 truncate partition p2; +alter table t_subpart_hash_hash_10 truncate partition for (1); + +-- SUBPARTITION [FOR] +alter table t_subpart_range_hash_1 truncate subpartition p1_subpartdefault1; +alter table t_subpart_range_hash_1 truncate subpartition for (100, 51); +alter table t_subpart_range_hash_10 truncate subpartition sp2; +alter table t_subpart_range_hash_10 truncate subpartition for (10, 'MAXVALUE', 9); + +alter table t_subpart_list_hash_1 truncate subpartition p1_subpartdefault1; +alter table t_subpart_list_hash_1 truncate subpartition for (10, 51); +alter table t_subpart_list_hash_10 truncate subpartition sp2; +alter table t_subpart_list_hash_10 truncate subpartition for (100, 9); + +alter table t_subpart_hash_hash_1 truncate subpartition p1_subpartdefault1; +alter table t_subpart_hash_hash_1 truncate subpartition for (11, 51); +alter table t_subpart_hash_hash_7 truncate subpartition sp1; +alter table t_subpart_hash_hash_7 truncate subpartition for (101, 10); +alter table t_subpart_hash_hash_10 truncate subpartition sp2; +alter table t_subpart_hash_hash_10 truncate subpartition for (1, 7); + +alter table t_subpart_range_hash_1 truncate partition p_error; +alter table t_subpart_range_hash_1 truncate partition for (300); +alter table t_subpart_range_hash_1 truncate subpartition sp_error; +alter table t_subpart_range_hash_1 truncate subpartition for (10, 4); -- ok + +alter table t_subpart_list_hash_1 truncate partition p_error; +alter table t_subpart_list_hash_1 truncate partition for (999); +alter table t_subpart_list_hash_1 truncate subpartition sp_error; +alter table t_subpart_list_hash_1 truncate subpartition for (10, 4); -- ok + +alter table t_subpart_hash_hash_1 truncate partition p_error; +alter table t_subpart_hash_hash_1 truncate partition for (4); -- ok +alter table t_subpart_hash_hash_1 truncate subpartition sp_error; +alter table t_subpart_hash_hash_1 truncate subpartition for (11, 4); -- ok + + + + +alter table t_subpart_range_hash_1 set subpartition template +( + subpartition sp1, + subpartition sp2 +); + +alter table t_subpart_list_hash_1 set subpartition template +( + subpartition sp1, + subpartition sp2 +); + +alter table t_subpart_hash_hash_1 set subpartition template +( + subpartition sp1, + subpartition sp2 +); + + + +---------------------------- +-- TODO SPLIT [SUB]PARTITION [FOR] +---------------------------- +-- TODO SPLIT RANGE PARTITION [FOR] +alter table t_subpart_range_hash_2 split partition p1 at (5) into (partition p1_1, partition p1_2); +alter table t_subpart_range_hash_2 split partition for (50) at (50) into (partition p2_1, partition p2_2); + +alter table t_subpart_range_hash_2 split partition p3 into (partition p3_1 end (200), partition p3_2 end (300), partition p3_3 end (400), partition p3_4 end (500), partition p3_5 end (MAXVALUE)); +alter table t_subpart_range_hash_2 split partition for (50) into (partition p2_2_1 values less than (60), partition p2_2_2 values less than (100)); + + +alter table t_subpart_range_hash_2 split partition p1_1 values (5) into (partition p1_1_1, partition p1_1_2); + + +-- TODO SPLIT LIST PARTITION [FOR] +alter table t_subpart_list_hash_2 split partition p1 values (5) into (partition p1_1, partition p1_2); +alter table t_subpart_list_hash_2 split partition for (100) values (200) into (partition p1_1, partition p1_2); +alter table t_subpart_list_hash_2 split partition p2 into (partition p3_1 values (10), partition p3_2 values (20), partition p3_3 values (30), partition p3_4 values (40), partition p3_5 values (50)); + +-- error, LIST partition not support AT ... INTO +alter table t_subpart_list_hash_2 split partition p1 at (3) into (partition p1_1, partition p1_2); +alter table t_subpart_list_hash_2 split partition for (3) at (3) into (partition p1_1 values (10), partition p1_2 values (20.6789)); + + +-- HASH partition not support SPLIT +alter table t_subpart_hash_hash_2 split partition p1 at (5) into (partition p1_1, partition p1_2); +alter table t_subpart_hash_hash_2 split partition for (0) at (5) into (partition p1_1, partition p1_2); +alter table t_subpart_hash_hash_2 split partition p2 values (5) into (partition p1_1, partition p1_2); +alter table t_subpart_hash_hash_2 split partition p3 into (partition p3_1 values less than (100), partition p3_2 values less than (200)); + +-- HASH subpartition not support SPLIT +alter table t_subpart_hash_hash_3 split subpartition sp1 at ('a') into (subpartition sp1_1, subpartition sp1_2); +alter table t_subpart_hash_hash_3 split subpartition for (100, '1') at ('a') into (subpartition sp2_1, subpartition sp2_2 ); +alter table t_subpart_hash_hash_3 split subpartition sp1 values ('a') into (subpartition sp1_1, subpartition sp1_2); +alter table t_subpart_hash_hash_3 split subpartition for (100, '1') values ('1', '2') into (subpartition sp2_1, subpartition sp2_2 ); +alter table t_subpart_hash_hash_3 split subpartition sp3 into (subpartition sp3_1 values ('A'), subpartition sp3_2 values ('B'), subpartition sp3_3 values ('C'), subpartition sp3_4 values ('D', 'E')); +alter table t_subpart_hash_hash_3 split subpartition for (300, '1') into (subpartition sp5_1 values ('1', '2', '3', '4', '5'), subpartition sp5_2 values ('A', 'B', 'C', 'D', 'E'), subpartition sp5_3 values (DEFAULT)); + + + + + +-- TODO MERGE RANGE PARTITIONS [FOR] +alter table t_subpart_range_hash_1 merge partitions p1,p2 into partition p12; +alter table t_subpart_range_hash_1 merge partitions for (1), for (10), for (100), for (200) into partition p1234; +alter table t_subpart_range_hash_1 merge partitions p1 to p4 into partition p1234; + +-- TODO MERGE LIST PARTITION [FOR] +alter table t_subpart_list_hash_1 merge partitions p1,p2 into partition p12; +alter table t_subpart_list_hash_1 merge partitions for (1), for (10), for (70), for (222) into partition p1234; + +alter table t_subpart_list_hash_1 merge partitions p1 to p4 into partition p1234; -- error + + +alter table t_subpart_hash_hash_1 merge partitions p1,p1 into partition p12; +alter table t_subpart_hash_hash_1 merge partitions for (0), for (1) into partition p12; +alter table t_subpart_hash_hash_1 merge partitions p1 to p3 into partition p123; + +alter table t_subpart_hash_hash_1 merge subpartitions sp1,sp1 into subpartition sp12; +alter table t_subpart_hash_hash_1 merge subpartitions for (1, 0), for (1, 1) into subpartition p12; +alter table t_subpart_hash_hash_1 merge subpartitions sp1 to sp2 into subpartition sp12; + + + + + +---------------------------- +-- TODO EXCHANGE PARTITION [FOR] +---------------------------- +create table t_subpart_range_hash_8_exchange (like t_subpart_range_hash_8); +alter table t_subpart_range_hash_8_exchange add primary key (id, age, name); +create table t_subpart_list_hash_8_exchange (like t_subpart_list_hash_8); +alter table t_subpart_list_hash_8_exchange add primary key (id, age, name); +create table t_subpart_hash_hash_8_exchange (like t_subpart_hash_hash_8); +alter table t_subpart_hash_hash_8_exchange add primary key (id, age, name); + +alter table t_subpart_range_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_range_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_range_hash_8_exchange; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION p1 with table t_subpart_range_hash_8_exchange WITH VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION for (20, 'A') with table t_subpart_range_hash_8_exchange VERBOSE; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION for (19, 'BBB') with table t_subpart_range_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_list_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_list_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_list_hash_8_exchange; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION p1 with table t_subpart_list_hash_8_exchange WITH VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION for (20) with table t_subpart_list_hash_8_exchange VERBOSE; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION for (20)with table t_subpart_list_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_hash_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_hash_hash_8_exchange; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION p1 with table t_subpart_hash_hash_8_exchange WITH VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION for (10) with table t_subpart_hash_hash_8_exchange VERBOSE; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION for (10) with table t_subpart_hash_hash_8_exchange WITH VALIDATION VERBOSE; + + + +---------------------------- +-- EXCHANGE SUBPARTITION [FOR] +---------------------------- +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_range_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_range_hash_8_exchange; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_range_hash_8_exchange WITH VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION for (20, 'A', '10') with table t_subpart_range_hash_8_exchange VERBOSE; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION for (19, 'BBB', '10') with table t_subpart_range_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_list_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_list_hash_8_exchange; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_list_hash_8_exchange WITH VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION for (20, '20') with table t_subpart_list_hash_8_exchange VERBOSE; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION for (20, '20') with table t_subpart_list_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_hash_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_hash_hash_8_exchange; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_hash_hash_8_exchange WITH VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION for (10, '20') with table t_subpart_hash_hash_8_exchange VERBOSE; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION for (10, '20') with table t_subpart_hash_hash_8_exchange WITH VALIDATION VERBOSE; + + +drop table t_subpart_range_hash_8_exchange; +drop table t_subpart_list_hash_8_exchange; +drop table t_subpart_hash_hash_8_exchange; + + + +-- TODO List partition MODIFY ADD/DROP VALUES (...) +-- alter table xxx modify partition p1 add values (); +-- alter table xxx modify partition p1 drop values (); +-- alter table xxx modify subpartition p1 add values (); +-- alter table xxx modify subpartition p1 addropd values (); + + + +---------------------------- +-- TODO MOVE [SUB]PARTITION [FOR] +---------------------------- +alter table t_subpart_range_hash_10 move partition p1 tablespace ts_subpart_hash_1; +alter table t_subpart_range_hash_10 move partition for (10, 'MAXVALUE') tablespace ts_subpart_hash_1; +alter table t_subpart_range_hash_10 move subpartition sp2 tablespace ts_subpart_hash_2; +alter table t_subpart_range_hash_10 move subpartition for (10, 'MAXVALUE', '1') tablespace ts_subpart_hash_2; + +alter table t_subpart_list_hash_10 move partition p1 tablespace ts_subpart_hash_1; +alter table t_subpart_list_hash_10 move partition for (100) tablespace ts_subpart_hash_1; +alter table t_subpart_list_hash_10 move subpartition sp2 tablespace ts_subpart_hash_2; +alter table t_subpart_list_hash_10 move subpartition for (100, '1') tablespace ts_subpart_hash_2; + +alter table t_subpart_hash_hash_10 move partition p1 tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_10 move partition for (1) tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_10 move subpartition sp2 tablespace ts_subpart_hash_2; +alter table t_subpart_hash_hash_10 move subpartition for (1, '1') tablespace ts_subpart_hash_2; + + + +---------------------------- +-- TODO ROW MOVEMENT +---------------------------- +alter table t_subpart_range_hash_10 enable row movement; +alter table t_subpart_range_hash_10 disable row movement; + +alter table t_subpart_list_hash_10 enable row movement; +alter table t_subpart_list_hash_10 disable row movement; + +alter table t_subpart_hash_hash_10 enable row movement; +alter table t_subpart_hash_hash_10 disable row movement; + + + +---------------------------- +-- ALTER INDEX ... UNUSABLE +-- ALTER INDEX ... REBUILD +-- ALTER INDEX ... MODIFY [SUB]PARTITION name UNUSABLE +-- ALTER INDEX ... REBUILD [SUB]PARTITION name +---------------------------- +alter index i_t_subpart_hash_hash_10_3 UNUSABLE; + +alter index i_t_subpart_hash_hash_10_3 REBUILD partition subp1_bd_idx_local; +alter index i_t_subpart_hash_hash_10_3 REBUILD subpartition subp3_bd_idx_local; + +alter index i_t_subpart_hash_hash_10_4 UNUSABLE; +alter index i_t_subpart_hash_hash_10_4 REBUILD partition subp1_index_local; -- error +alter index i_t_subpart_hash_hash_10_4 REBUILD subpartition subp3_index_local; -- error + +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; +select relname, relkind, parttype, indisusable from pg_class left join pg_index on pg_class.oid=indexrelid where pg_class.oid in ('i_t_subpart_hash_hash_10_3'::regclass, 'i_t_subpart_hash_hash_10_4'::regclass) order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +-- alter index i_t_subpart_hash_hash_10_3 REBUILD; +-- alter index i_t_subpart_hash_hash_10_4 REBUILD; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter index i_t_subpart_hash_hash_10_3 modify partition subp1_bd_idx_local unusable; +alter index i_t_subpart_hash_hash_10_3 modify subpartition subp3_bd_idx_local unusable; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; +select relname, relkind, parttype, indisusable from pg_class left join pg_index on pg_class.oid=indexrelid where pg_class.oid in ('i_t_subpart_hash_hash_10_3'::regclass, 'i_t_subpart_hash_hash_10_4'::regclass) order by relname; + + + +---------------------------- +-- ALTER TABLE ... MODIFY [SUB]PARTITION [FOR] ... [REBUILD] UNUSABLE LOCAL INDEXES +---------------------------- +alter table t_subpart_hash_hash_10 modify partition p1 unusable local indexes; +alter table t_subpart_hash_hash_10 modify partition for (3) unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter table t_subpart_hash_hash_10 modify partition p1 REBUILD unusable local indexes; +alter table t_subpart_hash_hash_10 modify partition for (3) REBUILD unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter table t_subpart_hash_hash_10 modify subpartition sp1 unusable local indexes; +alter table t_subpart_hash_hash_10 modify subpartition for (3, NULL) unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter table t_subpart_hash_hash_10 modify subpartition sp1 REBUILD unusable local indexes; +alter table t_subpart_hash_hash_10 modify subpartition for (3, NULL) REBUILD unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + + + +---------------------------- +-- TODO RENAME +---------------------------- +alter table t_subpart_hash_hash_10 rename partition p2 to p0; +alter table t_subpart_hash_hash_10 rename partition p0 to p2; + +alter table t_subpart_hash_hash_10 rename subpartition sp2 to sp0; +alter table t_subpart_hash_hash_10 rename subpartition sp0 to sp2; + + + + +select table_name,partitioning_type,subpartitioning_type,partition_count, +def_subpartition_count,partitioning_key_count,subpartitioning_key_count +from all_part_tables where lower(table_name) in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11') order by table_name; + +select (table_owner is not null) as has_owner,table_name,partition_name,subpartition_name from all_tab_subpartitions where lower(table_name) in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11') order by table_name,partition_name,subpartition_name; + +select (table_owner is not null) as has_owner,table_name,partition_name,subpartition_count from all_tab_partitions where lower(table_name) in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11') order by table_name,partition_name; + + + + +CREATE TABLE t_subpart_cstore_hh (id integer, name varchar(30), db date) +with ( orientation = column ) +partition by hash(id) +subpartition by hash(db) +( +partition p1 +); + + + +---------------------------- +-- ERROR +---------------------------- + +create table t_subpart_error (id integer, name varchar(30)) +partition by VALUES(id) +subpartition by hash(id); + +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by VALUES(name) +( +partition p1 +); + +create table t_subpart_interval (id integer, name varchar(30), db date) +partition by range(db) +INTERVAL ('1 day') +subpartition by hash(id) +( +partition p1 values less than ('2000-01-01') +); + +create table t_subpart_error (id integer, name varchar(30), db date) +partition by hash(id) +subpartition by range(db) +INTERVAL ('1 day') +( +partition p1 +); + + + + +select oid,relname from pg_class +where (relkind = 'r' and parttype != 'n' and oid not in (select distinct parentid from pg_partition where parttype='r')) + or (relkind = 'i' and parttype != 'n' and oid not in (select distinct parentid from pg_partition where parttype='x')); + +select p1.relname, p1.parttype, p1.parentid, p1.boundaries +from pg_partition p1 +where (p1.parttype = 'r' and p1.parentid not in (select oid from pg_class where relkind = 'r' and parttype != 'n')) + or (p1.parttype = 'r' and not exists (select oid from pg_partition where parttype='p' and parentid=p1.parentid)) + or (p1.parttype = 'p' and not exists (select oid from pg_partition where parttype='r' and parentid=p1.parentid)) + or (p1.parttype = 'p' and exists (select oid from pg_class where parttype='s' and oid=p1.parentid) and not exists (select oid from pg_partition where parttype='s' and parentid=p1.oid)) + or (p1.parttype = 's' and not exists (select oid from pg_partition where parttype='p' and oid=p1.parentid)) + or (p1.parttype = 'x' and p1.parentid not in (select oid from pg_class where relkind = 'i' and parttype != 'n')) + or (p1.indextblid != 0 and p1.indextblid not in (select oid from pg_partition where parttype != 'r')); + +drop index i_t_subpart_hash_hash_10_3, i_t_subpart_hash_hash_10_4; + + +-- drop table t_subpart_normal_table_hash, t_subpart_part_table_hash; +-- drop schema schema_vastbase_subpartition_hash cascade; +-- drop tablespace ts_subpart_hash_1; +-- drop tablespace ts_subpart_hash_2; +-- drop tablespace ts_subpart_hash_test_user; +-- drop user user_subpart_hash; diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_trigger.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_trigger.sql new file mode 100644 index 0000000000..09babf23aa --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_trigger.sql @@ -0,0 +1,100 @@ +create table employees(id int,salary int); + +create or replace trigger t +before insert or update of salary,id +or delete on employees +begin +case +when inserting then +dbms_output.put_line('inserting'); +when updating ('salary') then +dbms_output.put_line('updating salary'); +when updating ('id') then +dbms_output.put_line('updating id'); +when deleting then +dbms_output.put_line('deleting'); +end case; +end; +/ + +create table oldtab(id int,c1 char(8)); +create table newtab(id int,c1 int); + +create or replace trigger tri1 +after insert on oldtab +for each statement +begin +insert into newtab values(1,1),(2,2),(3,3); +end; +/ + +create or replace trigger tri2 +after update on oldtab +for each statement +begin +update newtab set c1=4 where id=2; +end; +/ + +create or replace trigger tri4 +after truncate on oldtab +for each statement +begin +insert into newtab values(4,4); +end; +/ + +create table oldtab2(id int,c1 char(8)); +create table newtab2(id int,c1 int); + +CREATE OR REPLACE FUNCTION func_tri21() +RETURNS TRIGGER AS $$ +BEGIN +insert into newtab2 values(1,1),(2,2),(3,3); +RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION func_tri22() +RETURNS TRIGGER AS $$ +BEGIN +update newtab2 set c1=4 where id=2; +RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION func_tri24() +RETURNS TRIGGER AS $$ +BEGIN +insert into newtab2 values(4,4); +RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +create trigger tri21 +after insert on oldtab2 +for each statement +execute procedure func_tri21(); + +create trigger tri22 +after update on oldtab2 +for each statement +execute procedure func_tri22(); + +create trigger tri24 +after truncate on oldtab2 +for each statement +execute procedure func_tri24(); + +create table t_trig_when(f1 boolean primary key, f2 text, f3 int, f4 date); +create or replace function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; + +create trigger f1_trig_update after update of f1 on t_trig_when for each row when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_type.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_type.sql new file mode 100644 index 0000000000..5e52956f82 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_type.sql @@ -0,0 +1,5 @@ +create type atype as (id int, name text); +create type btype as object (id int, name text); +create type ctype as (id int, name text); +alter type ctype rename to dtype; +drop type dtype; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_drop_type.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_drop_type.sql new file mode 100644 index 0000000000..348f776851 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_drop_type.sql @@ -0,0 +1,13 @@ +create table atable (id int, age int); + +create type atype as (id int, name text); +drop type atype; + +create type btype as object (id int, name text); +drop type btype; + +drop type typ_not_exit; + +drop type public.typ_not_exit; + +drop type schema_not_exit.typ_not_exit; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.setup b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.setup new file mode 100644 index 0000000000..9a92a4a505 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.setup @@ -0,0 +1,20 @@ +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +tblspace="$subscription_dir/tmp_tblspace" +rm -rf $tblspace +mkdir -p $tblspace + + +tblspace_sub="$subscription_dir/tmp_tblspace_sub" +rm -rf $tblspace_sub +mkdir -p $tblspace_sub + +exec_sql $case_use_db $pub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts1 LOCATION '$tblspace/hw_subpartition_tablespace_ts1';" +exec_sql $case_use_db $pub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts2 LOCATION '$tblspace/hw_subpartition_tablespace_ts2';" +exec_sql $case_use_db $pub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts3 LOCATION '$tblspace/hw_subpartition_tablespace_ts3';" + +exec_sql $case_use_db $sub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts1 LOCATION '$tblspace_sub/hw_subpartition_tablespace_ts1';" +exec_sql $case_use_db $sub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts2 LOCATION '$tblspace_sub/hw_subpartition_tablespace_ts2';" +exec_sql $case_use_db $sub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts3 LOCATION '$tblspace_sub/hw_subpartition_tablespace_ts3';" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.sql new file mode 100644 index 0000000000..a2fc397512 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_subpartition_tablespace.sql @@ -0,0 +1,1013 @@ +--DROP SCHEMA hw_subpartition_tablespace CASCADE; +CREATE SCHEMA hw_subpartition_tablespace; +SET CURRENT_SCHEMA TO hw_subpartition_tablespace; + +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); +-- DROP TABLEt_range_range1; + +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); +-- DROP TABLEt_range_range2; + +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); +-- DROP TABLEt_range_list1; + +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); +-- DROP TABLEt_range_list2; + +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); +-- DROP TABLEt_range_hash1; + +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); +-- DROP TABLEt_range_hash2; + +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); +-- DROP TABLEt_list_range1; + +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); +-- DROP TABLEt_list_range2; + +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); +-- DROP TABLEt_list_list1; + +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); +-- DROP TABLEt_list_list2; + +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); +-- DROP TABLEt_list_hash1; + +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); +-- DROP TABLEt_list_hash2; + +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); +-- DROP TABLEt_hash_range1; + +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); +-- DROP TABLEt_hash_range2; + +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); +-- DROP TABLEt_hash_list1; + +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); +-- DROP TABLEt_hash_list2; + +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); +-- DROP TABLEt_hash_hash1; + +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); +-- DROP TABLEt_hash_hash2; + +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); +-- DROP TABLEt_range_list3; + + +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); +-- DROP TABLEt_range_list4; + +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); + +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); +-- DROP TABLEt_list_hash3; + +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); +-- DROP TABLEt_list_hash4; + +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); +-- DROP TABLEt_list_range3; + +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); +-- DROP TABLEt_list_range4; + +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); +-- DROP TABLEt_hash_list3; + +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); +-- DROP TABLEt_hash_list4; + +-- +----test create index with tablespace---- +-- +CREATE TABLE t_range_list(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (DEFAULT) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts2 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); + +CREATE INDEX t_range_list_idx ON t_range_list(c1,c2) LOCAL +( + PARTITION idx_p1( + SUBPARTITION idx_p1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION idx_p1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION idx_p1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION idx_p1_4 + ), + PARTITION idx_p2 TABLESPACE hw_subpartition_tablespace_ts2( + SUBPARTITION idx_p2_1, + SUBPARTITION idx_p2_2, + SUBPARTITION idx_p2_3 + ), + PARTITION idx_p3 TABLESPACE hw_subpartition_tablespace_ts2( + SUBPARTITION idx_p3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION idx_p3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION idx_p3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION idx_p3_4 + ), + PARTITION idx_p4( + SUBPARTITION idx_p4_1, + SUBPARTITION idx_p4_2 TABLESPACE hw_subpartition_tablespace_ts2 + ), + PARTITION idx_p5 TABLESPACE hw_subpartition_tablespace_ts3( + SUBPARTITION idx_p5_1 + ), + PARTITION idx_p6( + SUBPARTITION idx_p6_1 TABLESPACE hw_subpartition_tablespace_ts2 + ) +) TABLESPACE hw_subpartition_tablespace_ts1; + +ALTER TABLE t_range_list TRUNCATE SUBPARTITION P_RANGE1_1; + +SELECT p.relname, t.spcname FROM pg_partition p, pg_class c, pg_namespace n, pg_tablespace t +WHERE p.parentid = c.oid + AND c.relname='t_range_list_idx' + AND c.relnamespace=n.oid + AND n.nspname=CURRENT_SCHEMA + AND p.reltablespace = t.oid +ORDER BY p.relname; + +SELECT pg_get_indexdef('hw_subpartition_tablespace.t_range_list_idx'::regclass); + +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +INSERT INTO range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_sales_idx ON range_sales(product_id) LOCAL; +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01') TABLESPACE hw_subpartition_tablespace_ts3; +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_end VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts3; + +create table test_index_lt (a int, b int, c int) +partition by list(a) +( + PARTITION p1 VALUES (3, 4, 5), + PARTITION p2 VALUES (1, 2) +); + +ALTER TABLE test_index_lt ADD PARTITION p3 VALUES (6) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE test_index_lt MOVE PARTITION FOR (5) TABLESPACE hw_subpartition_tablespace_ts3; + +ALTER TABLE test_index_lt MOVE PARTITION p3 TABLESPACE hw_subpartition_tablespace_ts3; + +create index test_index_lt_idx on test_index_lt(a) local; + +ALTER TABLE test_index_lt MODIFY PARTITION p1 UNUSABLE LOCAL INDEXES ; + +ALTER TABLE test_index_lt MODIFY PARTITION p1 rebuild UNUSABLE LOCAL INDEXES ; + +-- DROP TABLEt_range_list; + +--finish +drop tablespace hw_subpartition_tablespace_ts1; +drop tablespace hw_subpartition_tablespace_ts2; +drop tablespace hw_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts3' + +DROP SCHEMA hw_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_view_def.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_view_def.sql new file mode 100644 index 0000000000..1a5917bad8 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_view_def.sql @@ -0,0 +1,36 @@ +CREATE TABLE tb_class (id INT,class_name TEXT); +INSERT INTO tb_class (id,class_name) VALUES (1,'class_1'); +INSERT INTO tb_class (id,class_name) VALUES (2,'class_2'); + +CREATE TABLE tb_student (id INT,student_name TEXT,class_id INT); +INSERT INTO tb_student (id,student_name,class_id) VALUES (1,'li lei',1); +INSERT INTO tb_student (id,student_name,class_id) VALUES (2,'han meimei',1); +INSERT INTO tb_student (id,student_name,class_id) VALUES (3,'zhang xiaoming',2); +INSERT INTO tb_student (id,student_name,class_id) VALUES (4,'wang peng',2); + +CREATE VIEW vw_class AS SELECT * FROM tb_class; +CREATE VIEW vw_student AS SELECT * FROM tb_student; +CREATE VIEW vw_class_student AS SELECT c.class_name,s.student_name FROM tb_class c JOIN tb_student s ON c.id = s.class_id; +CREATE VIEW vw_class_1_student AS SELECT c.class_name,s.student_name FROM tb_class c JOIN tb_student s ON c.id = s.class_id WHERE c.id = 1; + +CREATE TABLE tb_order (id INT,order_product TEXT,order_time timestamptz); +INSERT INTO tb_order (id,order_product) VALUES (1,'football'); +INSERT INTO tb_order (id,order_product) VALUES (2,'baskball'); + +CREATE VIEW vw_order AS SELECT * FROM tb_order; +ALTER VIEW vw_order ALTER COLUMN order_time SET DEFAULT now(); + + +CREATE TABLE tb_address (id INT,address TEXT); +INSERT INTO tb_address (id,address) VALUES (1,'a_address'); +INSERT INTO tb_address (id,address) VALUES (2,'b_address'); + +CREATE VIEW vw_address AS SELECT * FROM tb_address; +ALTER VIEW vw_address RENAME TO vw_address_new; + +CREATE TABLE tb_book (id INT,book_name TEXT); +INSERT INTO tb_book (id,book_name) VALUES (1,'englisen'); +INSERT INTO tb_book (id,book_name) VALUES (2,'math'); + +CREATE VIEW vw_book AS SELECT * FROM tb_book; +DROP VIEW vw_book; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_002.diff b/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_002.diff new file mode 100644 index 0000000000..a36570ea85 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_002.diff @@ -0,0 +1,4 @@ +386c386 +< CONSTRAINT at4acc_test1 CHECK (((test OPERATOR(dolphin_catalog.+) test2) < (test3 OPERATOR(dolphin_catalog.*) 4))) +--- +> CONSTRAINT at4acc_test1 CHECK ((((test)::bigint + (test2)::bigint) < ((test3)::bigint * (4)::bigint))) diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_rewrite.diff b/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_rewrite.diff new file mode 100644 index 0000000000..109bd6b009 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_table_rewrite.diff @@ -0,0 +1,4 @@ +47c47 +< g timestamp(0) with time zone GENERATED ALWAYS AS (((b)::double precision + 1::double precision)) STORED, +--- +> g timestamp(0) with time zone GENERATED ALWAYS AS (((b)::double precision + (1)::double precision)) STORED, diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql b/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql index 60fb85d613..060850d2d1 100644 --- a/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql +++ b/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql @@ -91,4 +91,10 @@ CREATE TABLE tab_on_update12 ( col_name TEXT, upd_time TIME DEFAULT LOCALTIMESTAMP(2) ON UPDATE LOCALTIMESTAMP(2) COMMENT 'update record local timestamp(2)', PRIMARY KEY (col_id) -); \ No newline at end of file +); + +alter table tab_on_update12 MODIFY COLUMN col_name text CHARSET 'utf8' COLLATE utf8mb4_bin; +alter table tab_on_update12 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_bin; +alter table tab_on_update12 CONVERT TO CHARACTER SET 'utf8' COLLATE utf8mb4_bin; + +alter table tab_on_update12 CHANGE COLUMN col_name col_name2 text CHARSET 'utf8' COLLATE utf8mb4_bin FIRST; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.setup new file mode 100644 index 0000000000..234cbb4cd1 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.setup @@ -0,0 +1,9 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" + +exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.sql new file mode 100644 index 0000000000..31dfb7d2ef --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.sql @@ -0,0 +1,67 @@ +-- +-- IMMUTABLE | STABLE | VOLATILE +-- +CREATE FUNCTION functest_B_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_B_2(int) RETURNS bool LANGUAGE 'sql' + IMMUTABLE AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_B_3(int) RETURNS bool LANGUAGE 'sql' + STABLE AS 'SELECT $1 = 0'; +CREATE FUNCTION functest_B_4(int) RETURNS bool LANGUAGE 'sql' + VOLATILE AS 'SELECT $1 < 0'; +ALTER FUNCTION functest_B_2(int) VOLATILE; + +-- +-- SECURITY DEFINER | INVOKER +-- +CREATE FUNCTION functest_C_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' + SECURITY DEFINER AS 'SELECT $1 = 0'; +CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' + SECURITY INVOKER AS 'SELECT $1 < 0'; +ALTER FUNCTION functest_C_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_C_2(int) SECURITY INVOKER; +ALTER FUNCTION functest_C_3(int) SECURITY DEFINER; + +-- +-- LEAKPROOF +-- +CREATE FUNCTION functest_E_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 100'; +CREATE FUNCTION functest_E_2(int) RETURNS bool LANGUAGE 'sql' + LEAKPROOF AS 'SELECT $1 > 100'; +ALTER FUNCTION functest_E_1(int) LEAKPROOF; +ALTER FUNCTION functest_E_2(int) STABLE; -- unrelated change, no effect +ALTER FUNCTION functest_E_2(int) NOT LEAKPROOF; -- remove leakproog attribute +-- it takes superuser privilege to turn on leakproof, but not for turn off +--ALTER FUNCTION functest_E_1(int) OWNER TO regtest_unpriv_user; +--ALTER FUNCTION functest_E_2(int) OWNER TO regtest_unpriv_user; + + +-- +-- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT +-- +CREATE FUNCTION functest_F_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 50'; +CREATE FUNCTION functest_F_2(int) RETURNS bool LANGUAGE 'sql' + CALLED ON NULL INPUT AS 'SELECT $1 = 50'; +CREATE FUNCTION functest_F_3(int) RETURNS bool LANGUAGE 'sql' + RETURNS NULL ON NULL INPUT AS 'SELECT $1 < 50'; +CREATE FUNCTION functest_F_4(int) RETURNS bool LANGUAGE 'sql' + STRICT AS 'SELECT $1 = 50'; +ALTER FUNCTION functest_F_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_F_2(int) STRICT; +ALTER FUNCTION functest_F_3(int) CALLED ON NULL INPUT; + +--ALTER FUNCTION functest_F_3(int) COST 2; + +ALTER FUNCTION functest_F_3(int) set query_dop to 2; +ALTER FUNCTION functest_F_3(int) set work_mem to '2MB'; +ALTER FUNCTION functest_F_3(int) RESET work_mem; +ALTER FUNCTION functest_F_3(int) RESET ALL; + +CREATE FUNCTION select3 () RETURNS setof int LANGUAGE SQL +AS 'select generate_series(1, 10);'; +ALTER FUNCTION select3() ROWS 1; +ALTER FUNCTION select3() ROWS 33.3; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.teardown b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.teardown new file mode 100644 index 0000000000..4e450e63b0 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_function.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $sub_node1_port "DROP USER regtest_unpriv_user" +exec_sql_with_user $case_use_db $pub_node1_port "DROP USER regtest_unpriv_user" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.setup new file mode 100644 index 0000000000..234cbb4cd1 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.setup @@ -0,0 +1,9 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" + +exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.sql new file mode 100644 index 0000000000..ef95d030dd --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.sql @@ -0,0 +1,15 @@ +create SCHEMA test_sche; + +ALTER SCHEMA test_sche WITH BLOCKCHAIN; +ALTER SCHEMA test_sche WITHOUT BLOCKCHAIN; + +ALTER SCHEMA test_sche RENAME TO test_sche1; +ALTER SCHEMA test_sche1 OWNER TO regtest_unpriv_user; + +ALTER SCHEMA test_sche1 CHARACTER SET utf8mb4 COLLATE utf8mb4_bin; + +create SCHEMA test_sche2; + +create table t1(id int); +ALTER table t1 set SCHEMA test_sche2; + diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.teardown b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.teardown new file mode 100644 index 0000000000..4e450e63b0 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_schema.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $sub_node1_port "DROP USER regtest_unpriv_user" +exec_sql_with_user $case_use_db $pub_node1_port "DROP USER regtest_unpriv_user" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table.sql new file mode 100644 index 0000000000..2fc4e77b56 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table.sql @@ -0,0 +1,2358 @@ +-- +--FOR BLACKLIST FEATURE: REFERENCES/INHERITS/WITH OIDS/RULE/CREATE TYPE/DOMAIN is not supported. +-- +-- test inheritance + +create table dropColumn (a int, b int, e int); +create table dropColumnChild (c int) inherits (dropColumn); +create table dropColumnAnother (d int) inherits (dropColumnChild); + +-- these two should fail +alter table dropColumnchild drop column a; +alter table only dropColumnChild drop column b; + + + +-- these three should work +alter table only dropColumn drop column e; +alter table dropColumnChild drop column c; +alter table dropColumn drop column a; + +create table renameColumn (a int); +create table renameColumnChild (b int) inherits (renameColumn); +create table renameColumnAnother (c int) inherits (renameColumnChild); + +-- these three should fail +alter table renameColumnChild rename column a to d; +alter table only renameColumnChild rename column a to d; +alter table only renameColumn rename column a to d; + +-- these should work +alter table renameColumn rename column a to d; +alter table renameColumnChild rename column b to a; + +-- these should work +alter table if exists doesnt_exist_tab rename column a to d; +alter table if exists doesnt_exist_tab rename column b to a; + +-- this should work +alter table renameColumn add column w int; + +-- this should fail +alter table only renameColumn add column x int; + + +-- Test corner cases in dropping of inherited columns + +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +-- should work +alter table p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +select f1 from c1; +alter table c1 drop column f1; +select f1 from c1; + +drop table p1 cascade; + +create table p1 (f1 int, f2 int); +create table c1 () inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +alter table p1 drop column f1; +-- c1.f1 is dropped now, since there is no local definition for it +select f1 from c1; + +drop table p1 cascade; + +create table p1 (f1 int, f2 int); +create table c1 () inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +alter table only p1 drop column f1; +-- c1.f1 is NOT dropped, but must now be considered non-inherited +alter table c1 drop column f1; + +drop table p1 cascade; + +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); + +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +alter table only p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +alter table c1 drop column f1; + +drop table p1 cascade; + +create table p1(id int, name text); +create table p2(id2 int, name text, height int); +create table c1(age int) inherits(p1,p2); +create table gc1() inherits (c1); + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +-- should work +alter table only p1 drop column name; +-- should work. Now c1.name is local and inhcount is 0. +alter table p2 drop column name; +-- should be rejected since its inherited +alter table gc1 drop column name; +-- should work, and drop gc1.name along +alter table c1 drop column name; +-- should fail: column does not exist +alter table gc1 drop column name; +-- should work and drop the attribute in all tables +alter table p2 drop column height; + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +drop table p1, p2 cascade; + +-- +-- Test the ALTER TABLE SET WITH/WITHOUT OIDS command +-- +create table altstartwith (col integer) with oids; + +insert into altstartwith values (1); + +select oid > 0, * from altstartwith; + +alter table altstartwith set without oids; + +select oid > 0, * from altstartwith; -- fails +select * from altstartwith; + +alter table altstartwith set with oids; + +select oid > 0, * from altstartwith; + +drop table altstartwith; + +-- Check inheritance cases +create table altwithoid (col integer) with oids; + +-- Inherits parents oid column anyway +create table altinhoid () inherits (altwithoid) without oids; + +insert into altinhoid values (1); + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +alter table altwithoid set without oids; + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; -- fails +select * from altwithoid; +select * from altinhoid; + +alter table altwithoid set with oids; + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +drop table altwithoid cascade; + +create table altwithoid (col integer) without oids; + +-- child can have local oid column +create table altinhoid () inherits (altwithoid) with oids; + +insert into altinhoid values (1); + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; + +alter table altwithoid set with oids; + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +-- the child's local definition should remain +alter table altwithoid set without oids; + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; + +drop table altwithoid cascade; + +-- test renumbering of child-table columns in inherited operations + +create table p1 (f1 int); +create table c1 (f2 text, f3 int) inherits (p1); + +alter table p1 add column a1 int check (a1 > 0); +alter table p1 add column f2 text; + +insert into p1 values (1,2,'abc'); +insert into c1 values(11,'xyz',33,0); -- should fail +insert into c1 values(11,'xyz',33,22); + +select * from p1 order by f1; +update p1 set a1 = a1 + 1, f2 = upper(f2); +select * from p1 order by f1; + +drop table p1 cascade; + +-- test that operations with a dropped column do not try to reference +-- its datatype + +--create domain mytype as text; +--create table foo (f1 text, f2 mytype, f3 text);; + +insert into foo values('bb','cc','dd'); +select * from foo order by f1; + +--drop domain mytype cascade; + +--select * from foo order by f1; +--insert into foo values('qq','rr'); +--select * from foo order by f1; +--update foo set f3 = 'zz'; +--select * from foo order by f1; +--select f3,max(f1) from foo group by f3; + +-- Simple tests for alter table column type +--delete from foo where f1 = 'qq'; +--alter table foo alter f1 TYPE integer; -- fails +--alter table foo alter f1 TYPE varchar(10); +--drop table foo; + +create table anothertab (atcol1 serial8, atcol2 boolean, + constraint anothertab_chk check (atcol1 <= 3));; +alter table anothertab replica identity full; +insert into anothertab (atcol1, atcol2) values (1, true); +insert into anothertab (atcol1, atcol2) values (3, false); +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol1 type boolean; -- we cannot support this cast with numeric nextval +alter table anothertab alter column atcol1 type integer; + +select * from anothertab order by atcol1, atcol2; + +insert into anothertab (atcol1, atcol2) values (45, null); -- fails +--insert into anothertab (atcol1, atcol2) values (default, null); + +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol2 type text + using case when atcol2 is true then 'IT WAS TRUE' + when atcol2 is false then 'IT WAS FALSE' + else 'IT WAS NULL!' end; + +select * from anothertab order by atcol1, atcol2; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab alter column atcol1 drop default; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab drop constraint anothertab_chk; +alter table anothertab drop constraint anothertab_chk; -- fails +alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds + +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; + +select * from anothertab order by atcol1, atcol2; + +-- drop table anothertab; + +create table another (f1 int, f2 text);; +alter table another replica identity full; +insert into another values(1, 'one'); +insert into another values(2, 'two'); +insert into another values(3, 'three'); + +select * from another order by f1, f2; + +alter table another + alter f1 type text using f2 || ' more', + alter f2 type bigint using f1 * 10; + +select * from another order by f1, f2; + +-- drop table another; + +-- table's row type +create table tab1 (a int, b text); +create table tab2 (x int, y tab1); +alter table tab1 alter column b type varchar; -- fails + +-- disallow recursive containment of row types +create table recur1 (f1 int); +alter table recur1 add column f2 recur1; -- fails +alter table recur1 add column f2 recur1[]; -- fails +--create domain array_of_recur1 as recur1[]; +--alter table recur1 add column f2 array_of_recur1; -- fails +create table recur2 (f1 int, f2 recur1); +alter table recur1 add column f2 recur2; -- fails +alter table recur1 add column f2 int; +alter table recur1 alter column f2 type recur2; -- fails + +-- SET STORAGE may need to add a TOAST table +create table test_storage (a text); +alter table test_storage alter a set storage plain; +alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table +alter table test_storage alter a set storage extended; -- re-add TOAST table + +select reltoastrelid <> 0 as has_toast_table +from pg_class +where oid = 'test_storage'::regclass; + +-- ALTER TYPE with a check constraint and a child table (bug before Nov 2012) +CREATE TABLE test_inh_check (a float check (a > 10.2)); +CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); +ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +\d test_inh_check +\d test_inh_check_child + +-- +-- lock levels +-- +drop type lockmodes; +create type lockmodes as enum ( + 'AccessShareLock' +,'RowShareLock' +,'RowExclusiveLock' +,'ShareUpdateExclusiveLock' +,'ShareLock' +,'ShareRowExclusiveLock' +,'ExclusiveLock' +,'AccessExclusiveLock' +); + +drop view my_locks; +create view my_locks as +select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode +from pg_locks l join pg_class c on l.relation = c.oid +where virtualtransaction = ( + select virtualtransaction + from pg_locks + where transactionid = txid_current()::integer) +and locktype = 'relation' +and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') +and c.relname != 'my_locks' +group by c.relname; + +create table alterlock (f1 int primary key, f2 text); + +start transaction; alter table alterlock alter column f2 set statistics 150; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock cluster on alterlock_pkey; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set without cluster; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (fillfactor = 100); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock reset (fillfactor); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (toast.autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock alter column f2 set (n_distinct = 1); +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set storage extended; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set default 'x'; +select * from my_locks order by 1; +rollback; + +-- cleanup +drop table alterlock; +drop view my_locks; +drop type lockmodes; + +-- +-- alter function +-- +-- create function test_strict(text) returns text as +-- 'select coalesce($1, ''got passed a null'');' +-- language sql returns null on null input; +-- select test_strict(NULL); +-- alter function test_strict(text) called on null input; +-- select test_strict(NULL); + +-- create function non_strict(text) returns text as +-- 'select coalesce($1, ''got passed a null'');' +-- language sql called on null input; +-- select non_strict(NULL); +-- alter function non_strict(text) returns null on null input; +-- select non_strict(NULL); + +-- +-- alter object set schema +-- + +create schema alter1; +create schema alter2; + +create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0)); + +create view alter1.v1 as select * from alter1.t1; + +create function alter1.plus1(int) returns int as 'select $1+1' language sql; + +--create domain alter1.posint integer check (value > 0); + +create type alter1.ctype as (f1 int, f2 text); + +create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql +as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; + +create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype); + +create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as + operator 1 alter1.=(alter1.ctype, alter1.ctype); + +create conversion alter1.ascii_to_utf8 for 'sql_ascii' to 'utf8' from ascii_to_utf8; + +create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +create text search configuration alter1.cfg(parser = alter1.prs); +create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); +create text search dictionary alter1.dict(template = alter1.tmpl); + +insert into alter1.t1(f2) values(11); +insert into alter1.t1(f2) values(12); + +alter table alter1.t1 set schema alter2; +alter table alter1.v1 set schema alter2; +alter function alter1.plus1(int) set schema alter2; +--alter domain alter1.posint set schema alter2; +alter operator class alter1.ctype_hash_ops using hash set schema alter2; +alter operator family alter1.ctype_hash_ops using hash set schema alter2; +alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2; +alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2; +alter type alter1.ctype set schema alter2; +alter conversion alter1.ascii_to_utf8 set schema alter2; +alter text search parser alter1.prs set schema alter2; +alter text search configuration alter1.cfg set schema alter2; +alter text search template alter1.tmpl set schema alter2; +alter text search dictionary alter1.dict set schema alter2; + +-- this should succeed because nothing is left in alter1 +drop schema alter1; + +insert into alter2.t1(f2) values(13); +insert into alter2.t1(f2) values(14); + +select * from alter2.t1 order by f1, f2; + +select * from alter2.v1 order by f1, f2; + +select alter2.plus1(41); + +-- clean up +drop schema alter2 cascade; +drop schema alter1 cascade; + +-- +-- composite types +-- + +CREATE TYPE test_type AS (a int); +\d test_type + +ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ADD ATTRIBUTE b text; +\d test_type + +ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +\d test_type + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE b; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE c; -- fails + +ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; + +ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +\d test_type + +ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; +ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +\d test_type + +DROP TYPE test_type; + +CREATE TYPE test_type1 AS (a int, b text); +CREATE TABLE test_tbl1 (x int, y test_type1); +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails + +CREATE TYPE test_type2 AS (a int, b text); +CREATE TABLE test_tbl2 OF test_type2; +CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails +ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails +ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +\d test_type2 +\d test_tbl2 +\d test_tbl2_subclass + +DROP TABLE test_tbl2_subclass; +alter table test_tbl2 not of; +-- This test isn't that interesting on its own, but the purpose is to leave +-- behind a table to test pg_upgrade with. The table has a composite type +-- column in it, and the composite type has a dropped attribute. +CREATE TYPE test_type3 AS (a int); +CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; +ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; + +CREATE TYPE test_type_empty AS (); + +-- +-- typed tables: OF / NOT OF +-- + +CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); +ALTER TYPE tt_t0 DROP ATTRIBUTE z; +CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK +CREATE TABLE tt1 (x int, y bigint); -- wrong base type +CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod +CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order +CREATE TABLE tt4 (x int); -- too few columns +CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns +CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent +CREATE TABLE tt7 (x int, q text, y numeric(8,2)) WITH OIDS; +ALTER TABLE tt7 DROP q; -- OK + +ALTER TABLE tt0 OF tt_t0; +ALTER TABLE tt1 OF tt_t0; +ALTER TABLE tt2 OF tt_t0; +ALTER TABLE tt3 OF tt_t0; +ALTER TABLE tt4 OF tt_t0; +ALTER TABLE tt5 OF tt_t0; +ALTER TABLE tt6 OF tt_t0; +ALTER TABLE tt7 OF tt_t0; + +CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); +ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table +ALTER TABLE tt7 NOT OF; +\d tt7 +alter table tt0 not of; +-- make sure we can drop a constraint on the parent but it remains on the child +CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); +CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent); +ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; +-- should fail +INSERT INTO test_drop_constr_child (c) VALUES (NULL); +DROP TABLE test_drop_constr_parent CASCADE; + +-- +-- IF EXISTS test +-- +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +CREATE TABLE tt8(a int); +CREATE SCHEMA alter2; + +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +\d alter2.tt8 + +DROP TABLE alter2.tt8; +DROP SCHEMA alter2; + +-- create database test_first_after_A dbcompatibility 'A'; +-- \c test_first_after_A + +-- test add column ... first | after columnname +-- common scenatios +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 clob first, add f7 blob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +\d+ t1 +select * from t1; + +-- 1 primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 blob first, add f7 clob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +select * from t1; +------------------------------------------------------------------------------------------- +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3; +\d+ t1; + +-- 2 unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3; +select * from t1; + +-- 3 default and generated column +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1, 2, 3), (11, 22, 33); +alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5; +select * from t1; + +-- 5 NULL and NOT NULL +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3; +\d+ t1 + +-- 6 check constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3; +select * from t1; + +-- 7 foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t_pri2(f1 text, f2 bool, f4 int primary key); +insert into t_pri2 values('a', true, 1), ('b', false, 2); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 2, true), (2, 2, false); +alter table t1 drop f2, add f4 int references t_pri2(f4) first; +select * from t1; +alter table t1 drop f4, add f4 int references t_pri2(f4) after f1; +select * from t1; + +-- partition table +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +-- subpartition table +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +-- test pg_partition +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +alter table range_range add f1 int default 1 first, add f2 text after id; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +select * from range_range; + +-- pg_index test +drop table if exists t1 cascade; +create table t1 +( + f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int, + primary key(f1, f2), + unique(f3, f4), + check(f5 = 10) +); +create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0; + +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +alter table t1 add f8 int first, add f9 int unique after f1; +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +-- pg_attribute test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 add f4 int default 4 first; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 drop f2, add f5 int default 5 after f1; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +-- pg_attrdef test +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored); +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 add f5 text default 'aaa' first; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +-- pg_depend test +drop table if exists t1 cascade; +create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored); +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +alter table t1 add t1 add f4 int first; +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; +alter table t1 drop f2, add f6 int, add f7 int generated always as (f1 + f6) stored after f1; +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +-- pg_rewrite test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +insert into t1 values(1, 2, 3), (11, 22, 33); +create view t1_view1 as select * from t1; +create view t1_view2 as select f1, f2 from t1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; + +-- pg_trigger test +drop table if exists t1 cascade; +create table t1(f1 boolean not null, f2 text, f3 int, f4 date); +alter table t1 add primary key(f1); +drop function if exists dummy_update_func; +create function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; + +drop trigger if exists f1_trig_update on t1; +drop trigger if exists f1_trig_insert on t1; + +create trigger f1_trig_update after update of f1 on t1 for each row + when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); +create trigger f1_trig_insert after insert on t1 for each row + when (not new.f1) execute procedure dummy_update_func('insert'); + +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +alter table t1 add f5 int after f1, add f6 boolean first; +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +-- pg_rlspolicy test +drop table if exists t1 cascade; +drop role if exists test_rlspolicy; +create role test_rlspolicy nologin password 'Gauss_234'; +create table t1 (f1 int, f2 int, f3 text) partition by range (f1) +( + partition t1_p0 values less than(10), + partition t1_p1 values less than(50), + partition t1_p2 values less than(100), + partition t1_p3 values less than(MAXVALUE) +); + +INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei'); +grant select on t1 to public; + +create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20); +create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy using (f1 < 30); + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first; +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); +drop table if exists t1 cascade; + +-- \c postgres +-- drop database test_first_after_A; + +-- test add column ... first | after columnname in B compatibility +-- create database test_first_after_B dbcompatibility 'b'; +-- \c test_first_after_B + +-- test add column ... first | after columnname in astore table +-- ASTORE table +-- common scenatios +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 clob first, add f7 blob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +\d+ t1 +select * from t1; + +-- 1 primary key +-- 1.1.1 primary key in original table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 clob first, add f7 blob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +\d+ t1 + +-- 1.1.2 primary key in original table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 blob first, add f7 clob after f2; +alter table t1 add f8 int, add f9 text first, add f10 float after f3; +select * from t1; + +-- 1.2.1 primary key in a table without data, add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +-- error +alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3; +select * from t1; + +-- 1.2.2 primary key in a table with data, add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3; +select * from t1; + +-- 1.3.1 primary key in a table without data, drop primary key, then add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3; +\d+ t1; + +-- 1.3.2 primary key in a table with data, drop primary key, then add column with primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1; +-- error +alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3; +select * from t1; + +-- 1.4.1 primary key in a table without data, drop primary key, the add column with primary key and default +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key default 7 first, add f8 float after f3; +\d+ t1 + +-- 1.4.2 primary key in a table with data, drop primary key, then add column with primary key and default +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1; +-- error +alter table t1 add f6 text, add f7 int primary key default 7 first, add f8 float after f3; +select * from t1; + +-- 1.5.1 primary key in a table without data, drop primary key, the add column with primary key and auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3; +\d+ t1 + +-- 1.5.2 primary key in a table with data, drop primary key, the add column with primary key and auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3; +select * from t1; + +-- 2 unique index +-- 2.1.1 unique index in a table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int first, add f7 float after f3; +\d+ t1 + +-- 2.1.2 unique index in a table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int first, add f7 float after f3; +select * from t1; + +-- 2.2.1 unique index in a table without data, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int unique first, add f7 float unique after f3; +\d+ t1 + +-- 2.2.2 unique index in a table with data, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int unique first, add f7 float unique after f3; +select * from t1; + +-- 2.3.1 unique index in a table without data, drop unique index, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3; +\d+ t1 + +-- 2.3.2 unique index in a table with data, drop unique index, add column with unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3; +select * from t1; + +-- 2.4.1 unique index in a table without data, drop unique index, add column with unique index and default +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int unique default 6 first; +alter table t1 drop f1, add f7 float unique default 7 after f3; +\d+ t1 + +-- 2.4.2 unique index in a table with data, drop unique index, add column with unique index and default +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 int unique default 6 first; +alter table t1 drop f1; +-- error +alter table t1 add f7 float unique default 7 after f3; +select * from t1; + +-- 3 default and generated column +-- 3.1.1 default in a table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int first, add f7 float after f3; +\d+ t1 + +-- 3.1.2 default in a table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int first, add f7 float after f3; +select * from t1; + +-- 3.2.1 default in a table without data, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int default 6 first, add f7 float default 7 after f3; +\d+ t1 + +-- 3.2.2 default in a table with data, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int default 6 first, add f7 float default 7 after f3; +select * from t1; + +-- 3.3.1 default in a table without data, drop default, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 int default 6 first, add f7 float default 7 after f3; +\d+ t1 + +-- 3.3.2 default in a table with data, drop default, add column with default +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3; +select * from t1; + +-- 3.4.1 generated column in a table without data, drop generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5; +\d+ t1 + +-- 3.4.1 generated column in a table with data, drop generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1, 2, 3), (11, 22, 33); +alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5; +select * from t1; + +-- 3.5.1 generated column in a table without data, add generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f2 + f3) stored after f5; +\d+ t1; + +-- 3.5.2 generated column in table with data, add generated column +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1, 2, 3), (11, 22, 33); +alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5; +select * from t1; + +-- 4 auto_increment +-- 4.1.1 auto_increment in a table without data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 text first, add f7 float after f3; +\d+ t1 + +-- 4.1.2 auto_increment in a table with data, add column without constraints +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 text first, add f7 float after f3; +select * from t1; + +-- 4.2.1 auto_increment in a table without data, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +-- error +alter table t1 add f6 int primary key auto_increment first; +-- error +alter table t1 add f7 int primary key auto_increment after f3; +\d+ t1 + +-- 4.2.2 auto_increment in a table with data, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 int primary key auto_increment first; +-- error +alter table t1 add f7 int primary key auto_increment after f3; +select * from t1; + +-- 4.3.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 int primary key auto_increment first; +\d+ t1 + +-- 4.3.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int primary key auto_increment first; + +-- 4.4.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment and default +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1; +-- error +alter table t1 add f6 int primary key auto_increment default 6 first; +\d+ t1 + +-- 4.4.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment and default +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1; +-- error +alter table t1 add f6 int primary key auto_increment default 6 first; +select * from t1; + +-- 5 NULL and NOT NULL +-- 5.1.1 null and not null in a table without data, add column without constraints +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 text first, add f7 float after f3; +\d+ t1 + +-- 5.1.2 null and not null in a table with data, add column without constraints +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 text first, add f7 float after f3; +select * from t1; + +-- 5.2.1 null and not null in table without data, add column with null or not null +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int null first; +alter table t1 add f7 float not null after f3; +\d+ t1 + +-- 5.2.2 null and not null in a table with data, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int null first; +-- error +alter table t1 add f7 float not null after f3; +select * from t1; + +-- 5.3.1 null and not null in a table without data, drop null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, add f6 int null first, add f7 float not null after f3; +\d+ t1 + +-- 5.3.2 null and not null in a table with data, drop null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, add f6 int null first; +-- error +alter table t1 add f7 float not null after f3; +select * from t1; + +-- 5.4.1 null and not null in a table without data, drop null and not null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3; +\d+ t1 + +-- 5.4.2 null and not null in a table without data, drop null and not null, add column with null or not null +drop table if exists t1 cascade; +create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f1, drop f2, add f6 int null first; +-- error +alter table t1 add f7 float not null after f3; +select * from t1; + +-- 6 check constraint +-- 6.1.1 check constraint in a table without data, add column without constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 text first, add f7 float after f3; +\d+ t1 + +-- 6.1.2 check constraint in a table with data, add column without constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 text first, add f7 float after f3; +select * from t1; + +-- 6.2.1 check constraint in a table without data, add column with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7); +\d+ t1 + +-- 6.2.2 check constraint in a table with data, add column with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7) after f3; +select * from t1; + +-- 6.3.1 check constraint in a table without data, drop check, add column with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check (f7 - f1 > 0) after f3; +\d+ t1 + +-- 6.3.2 check constraint in a table with data, drop check, add column with with check +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3; +select * from t1; + +-- 7 foreign key +-- 7.1.1 foreign key constraint in a table without data, add column without constraint +drop table if exists t_pri1 cascade; +create table t_pri1(f1 int, f2 int primary key); +drop table if exists t1 cascade; +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 add f4 int, add f5 text first, f6 float after f2; +\d+ t1 + +-- 7.1.2 foreign key constraint in a table with data, add column without constraint +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t1(f1 text, f2 int references t_pri1(f2), f3 bool); +insert into t1 values('a', 1, true), ('b', 2, false); +alter table t1 add f4 int, add f5 text first, f6 float after f2; +select * from t1; + +-- 7.2.1 foreign key constraint in a table without data, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +create table t_pri2(f1 int, f2 int, f4 int primary key); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 add f4 int references t_pri2(f4) first; +\d+ t1 +alter table t1 drop f4, add f4 int references t_pri2(f4) after f2; +\d+ t1 + +-- 7.2.2 foreign key constraint in a table with data, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t_pri2(f1 int, f2 bool, f4 int primary key); +insert into t_pri2 values(11, true, 1), (22, false, 2); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 1, true), (2, 2, false); +alter table t1 add f4 int references t_pri2(f4) first; +select * from t1; +alter table t1 drop f4, add f4 int references t_pri2(f4) after f2; +select * from t1; + +-- 7.3.1 foreign key constraint in a table without data, drop foreign key, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 int, f2 int primary key); +create table t_pri2(f1 int, f2 int, f4 int primary key); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 drop f2, add f4 int references t_pri2(f4) first; +\d+ t1 +alter table t1 drop f4, add f4 int references t_pri2(f4) after f1; +\d+ t1 + +-- 7.3.2 foreign key constraint in a table with data, drop foreign key, add column with foreign key +drop table if exists t1 cascade; +drop table if exists t_pri1 cascade; +drop table if exists t_pri2 cascade; +create table t_pri1(f1 text, f2 int primary key); +insert into t_pri1 values('a', 1), ('b', 2); +create table t_pri2(f1 text, f2 bool, f4 int primary key); +insert into t_pri2 values('a', true, 1), ('b', false, 2); +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 2, true), (2, 2, false); +alter table t1 drop f2, add f4 int references t_pri2(f4) first; +select * from t1; +alter table t1 drop f4, add f4 int references t_pri2(f4) after f1; +select * from t1; + +-- partition table +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +-- subpartition table +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +-- test pg_partition +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +alter table range_range add f1 int default 1 first, add f2 text after id; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; +select * from range_range; + + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool) with (orientation = column); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +-- error +alter table t1 add f6 text first; +-- error +alter table t1 add f6 text after f1; + +-- pg_index test +drop table if exists t1 cascade; +create table t1 +( + f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int, + primary key(f1, f2), + unique((lower(f3)), (abs(f4))), + check(f5 = 10) +); +create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0; + +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +alter table t1 add f8 int first, add f9 int unique after f1; +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3; + +-- pg_attribute test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 add f4 int default 4 first; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 drop f2, add f5 int default 5 after f1; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +-- pg_attrdef test +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored); +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 add f5 text default 'aaa' first; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1; -- ERROR + +-- pg_rewrite test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +insert into t1 values(1, 2, 3), (11, 22, 33); +create view t1_view1 as select * from t1; +create view t1_view2 as select f1, f2 from t1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; +alter table t1 add f4 int first, add f5 int after f1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; + +-- pg_trigger test +drop table if exists t1 cascade; +create table t1(f1 boolean not null, f2 text, f3 int, f4 date); +alter table t1 add primary key(f1); +drop function if exists dummy_update_func; +create function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; + +drop trigger if exists f1_trig_update on t1; +drop trigger if exists f1_trig_insert on t1; + +create trigger f1_trig_update after update of f1 on t1 for each row + when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); +create trigger f1_trig_insert after insert on t1 for each row + when (not new.f1) execute procedure dummy_update_func('insert'); + +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +alter table t1 add f5 int after f1, add f6 boolean first; +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +-- pg_rlspolicy test +drop table if exists t1 cascade; +drop role if exists test_rlspolicy2; +create role test_rlspolicy2 nologin password 'Gauss_234'; +create table t1 (f1 int, f2 int, f3 text) partition by range (f1) +( + partition t1_p0 values less than(10), + partition t1_p1 values less than(50), + partition t1_p2 values less than(100), + partition t1_p3 values less than(MAXVALUE) +); + +INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei'); +grant select on t1 to public; + +create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20); +create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy2 using (f1 < 30); + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first; +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +-- expression test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text); +insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei'); +-- T_FuncExpr +create index t1_idx1 on t1(abs(f1), f2); +-- T_OpExpr +create index t1_idx2 on t1((f1 + f2), (f1 - f3)); +-- T_BooleanTest +create index t1_idx3 on t1((f4 is true)); +-- T_CaseExpr and T_CaseWhen +create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end)); +-- T_ArrayExpr +create index t1_idx5 on t1((array[f1, f2, f3])); +-- T_TypeCast +create index t1_idx6 on t1(((f1 + f2 + 1) :: text)); +-- T_BoolExpr +create index t1_idx7 on t1((f1 and f2), (f2 or f3)); +-- T_ArrayRef +create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1])); +-- T_ScalarArrayOpExpr +create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10]))); +-- T_RowCompareExpr +create index t1_idx10 on t1((row(f1, f5) < row(f2, f6))); +-- T_MinMaxExpr +create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3)); +-- T_RowExpr +drop table if exists mytable cascade; +create table mytable(f1 int, f2 int, f3 text); +-- create function getf1(mytable) returns int as 'select $1.f1' language sql; +-- create index t1_idx12 on t1(getf1(row(f1, 2, 'a'))); +-- T_CoalesceExpr +create index t1_idx13 on t1(nvl(f1, f2)); +-- T_NullTest +create index t1_idx14 on t1((f1 is null)); +-- T_ScalarArrayOpExpr +create index t1_idx16 on t1((f1 in (1,2,3))); +-- T_NullIfExpr +create index t1_idx17 on t1(nullif(f5,f6)); +-- T_RelabelType +alter table t1 add f7 oid; +create index t1_idx18 on t1((f7::int4)); +-- T_CoerceViaIO +alter table t1 add f8 json; +create index t1_idx19 on t1((f8::jsonb)); +-- T_ArrayCoerceExpr +alter table t1 add f9 float[]; +create index t1_idx20 on t1((f9::int[])); +-- T_PrefixKey +create index t1_idx21 on t1(f6(5)); + +\d+ t1 +select * from t1; + +alter table t1 add f10 int primary key auto_increment after f4, + add f11 int generated always as (f1 + f2) stored after f1, + add f12 date default '2023-01-05' first, + add f13 int not null default 13 first; + +\d+ t1 +select * from t1; + +-- test modify column ... first | after column in astore table +-- ASTORE table +-- common scenatios +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4, modify f5 bool after f2; +\d+ t1 +select * from t1; +alter table t1 modify + +-- 1 primary key +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +-- 2 unique index +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +-- 3 default and generated column +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first; +\d+ t1 +alter table t1 drop f1; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored); +insert into t1 values(1,2,3),(11,22,33); +alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first; +\d+ t1 +select * from t1; +alter table t1 drop f1; +\d+ t1 +select * from t1; + +-- 4 auto_increment +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +\d+ t1 +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', true), ('b', '2022-11-09 19:56:10.158564', x'42', false); +\d+ t1 +select * from t1; +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +insert into t1(f3, f2, f4, f5, f1) values('2022-11-10 19:56:10.158564', 'c', x'43', false, 3); +select f1 from t1; + +-- 5 NULL and NOT NULL +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +alter table t1 modify f2 varchar(20) after f3; +\d+ t1 + +drop table if exists t1 cascade; +alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; +alter table t1 modify f2 varchar(20) after f3; +\d+ t1 +select * from t1; + +-- 6 check constraint +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +alter table t1 modify f1 int after f3; +\d+ t1 +alter table t1 drop f1, modify f5 bool first; +\d+ t1 + +drop table if exists t1 cascade; +create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +alter table t1 modify f3 timestamp first, modify f1 int after f4; +\d+ t1 +select * from t1; +alter table t1 modify f1 int after f3; +\d+ t1 +select * from t1; +alter table t1 drop f1, modify f5 bool first; +\d+ t1 +select * from t1; + +-- 7 foreign key +drop table if exists t_pri1 cascade; +create table t_pri1(f1 int, f2 int primary key); +drop table if exists t1 cascade; +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +alter table t1 modify f2 int first; +\d+ t1 +alter table t1 modify f2 int after f3; +\d+ t1 + +drop table if exists t_pri1 cascade; +create table t_pri1(f1 int, f2 int primary key); +insert into t_pri1 values(1,1),(2,2); +drop table if exists t1 cascade; +create table t1(f1 int, f2 int references t_pri1(f2), f3 bool); +insert into t1 values(1, 1, true), (2, 2, false); +alter table t1 modify f2 int first; +\d+ t1 +select * from t1; +alter table t1 modify f2 int after f3; +\d+ t1 +select * from t1; + +-- partition table +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int, primary key (f1, f2)) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first; +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +alter table t1 modify f1 int after f2; +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname; + +-- modify operation before add +alter table t1 add f4 int after f2, modify f1 int after f2; +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); + +drop table if exists t1 cascade; +create table t1 +(f1 int, f2 int, f3 int, primary key (f1, f2)) +partition by range(f1, f2) +( + partition t1_p0 values less than (10, 0), + partition t1_p1 values less than (20, 0), + partition t1_p2 values less than (30, 0) +); +insert into t1 values(9, -1, 1), (19, -1, 2), (29, -1, 3); +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +alter table t1 modify f1 int after f2; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +alter table t1 add f4 int after f2, modify f1 int after f2; +\d+ t1 +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1'); +select * from t1 partition (t1_p0); +select * from t1 partition (t1_p1); +select * from t1 partition (t1_p2); + +-- subpartition table +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday)) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify birthday date first, modify id int after gender; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + + +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday)) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); + +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify birthday date first, modify id int after gender; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +select * from range_range; + +-- pg_index test +drop table if exists t1 cascade; +create table t1 +( + f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int, + primary key(f1, f2), + unique((lower(f3)), (abs(f4))), + check(f5 = 10) +); +create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0; + +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1'); + +alter table t1 modify f1 int after f2, modify f4 int after f6, modify f5 int first; +\d+ t1 +select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1'); + +-- pg_attribute test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +alter table t1 modify f3 int first, modify f1 int after f2; +\d+ t1 +select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum; + +-- pg_attrdef test +drop table if exists t1 cascade; +create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored); +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +alter table t1 modify f3 int first, modify f1 int after f4, modify f4 int first; +\d+ t1 +select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum; + +-- pg_depend test +drop table if exists t1 cascade; +create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored, f4 int, unique ((abs(f4)))); +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +alter table t1 modify f4 int first, modify f3 int after f1, modify f1 int after f2; +\d+ t1 +select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend + where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5; + +-- pg_partition test +drop table if exists range_range cascade; +create table range_range(id int, gender varchar not null, birthday date not null) +partition by range (id) subpartition by range (birthday) +( + partition p_1 values less than(100) + ( + subpartition p_1_a values less than('2022-01-01'), + subpartition p_1_b values less than(MAXVALUE) + ), + partition p_2 values less than(200) + ( + subpartition p_2_a values less than('2022-01-01'), + subpartition p_2_b values less than(MAXVALUE) + ), + partition p_3 values less than(MAXVALUE) + ( + subpartition p_3_a values less than('2022-01-01'), + subpartition p_3_b values less than(MAXVALUE) + ) +); +insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24'); +insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08'); +insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21'); + +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify gender varchar after birthday; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + +alter table range_range modify birthday date first, modify id int after gender; +\d+ range_range +select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname; + + +-- pg_rewrite test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, f4 int); +insert into t1 values(1, 2, 3, 4), (11, 22, 33, 44); +create view t1_view1 as select * from t1; +create view t1_view2 as select f1, f4 from t1; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; +alter table t1 modify f2 int first, modify f1 int after f4, add f5 int after f4; +\d+ t1_view1 +\d+ t1_view2 +\d+ t1 +select pg_get_viewdef('t1_view1'); +select pg_get_viewdef('t1_view2'); +select * from t1_view1; +select * from t1_view2; +select * from t1; + +-- pg_trigger test +drop table if exists t1 cascade; +create table t1(f1 boolean not null, f2 text, f3 int, f4 date); +alter table t1 add primary key(f1); +drop function if exists dummy_update_func; +create function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; +drop function dummy_update_func; +drop trigger if exists f1_trig_update on t1; +drop trigger if exists f1_trig_insert on t1; + +create trigger f1_trig_update after update of f1 on t1 for each row + when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); +create trigger f1_trig_insert after insert on t1 for each row + when (not new.f1) execute procedure dummy_update_func('insert'); + +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +alter table t1 modify f3 int first, modify f1 boolean after f4; +\d+ t1 +select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname; + +-- pg_rlspolicy test +drop table if exists t1 cascade; +drop role if exists test_rlspolicy3; +create role test_rlspolicy3 nologin password 'Gauss_234'; +create table t1 (f1 int, f2 int, f3 text) partition by range (f1) +( + partition t1_p0 values less than(10), + partition t1_p1 values less than(50), + partition t1_p2 values less than(100), + partition t1_p3 values less than(MAXVALUE) +); + +INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei'); +grant select on t1 to public; + +create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20); +create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy3 using (f1 < 30); + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + +alter table t1 modify f2 int first, modify f1 int after f3; + +\d+ t1 +select * from t1 limit 10; +select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1'); + + +-- expression test +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text); +insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei'); +-- T_FuncExpr +create index t1_idx1 on t1(abs(f1), f2); +-- T_OpExpr +create index t1_idx2 on t1((f1 + f2), (f1 - f3)); +-- T_BooleanTest +create index t1_idx3 on t1((f4 is true)); +-- T_CaseExpr and T_CaseWhen +create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end)); +-- T_ArrayExpr +create index t1_idx5 on t1((array[f1, f2, f3])); +-- T_TypeCast +create index t1_idx6 on t1(((f1 + f2 + 1) :: text)); +-- T_BoolExpr +create index t1_idx7 on t1((f1 and f2), (f2 or f3)); +-- T_ArrayRef +create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1])); +-- T_ScalarArrayOpExpr +create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10]))); +-- T_RowCompareExpr +create index t1_idx10 on t1((row(f1, f5) < row(f2, f6))); +-- T_MinMaxExpr +create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3)); +-- T_RowExpr +drop table if exists mytable cascade; +create table mytable(f1 int, f2 int, f3 text); +-- create function getf1(mytable) returns int as 'select $1.f1' language sql; +-- create index t1_idx12 on t1(getf1(row(f1, 2, 'a'))); +-- T_CoalesceExpr +create index t1_idx13 on t1(nvl(f1, f2)); +-- T_NullTest +create index t1_idx14 on t1((f1 is null)); +-- T_ScalarArrayOpExpr +create index t1_idx16 on t1((f1 in (1,2,3))); +-- T_NullIfExpr +create index t1_idx17 on t1(nullif(f5,f6)); +-- T_RelabelType +alter table t1 add f7 oid; +create index t1_idx18 on t1((f7::int4)); +-- T_CoerceViaIO +alter table t1 add f8 json; +create index t1_idx19 on t1((f8::jsonb)); +-- T_ArrayCoerceExpr +alter table t1 add f9 float[]; +create index t1_idx20 on t1((f9::int[])); + +\d+ t1 +select * from t1; + +alter table t1 modify f8 json first, modify f2 int after f6, modify f7 oid after f3; + +\d+ t1 +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 int); +insert into t1 values(1,2); +alter table t1 add f3 int default 3, add f4 int default 4 after f3, add f5 int default 5, add f6 int default 6 after f3; +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 int); +insert into t1 values(1,2); +alter table t1 add f3 int default 3, add f4 int default 4 after f1, add f5 int default 5, add f6 int default 6 after f5; +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 int); +insert into t1 values(1,2); +alter table t1 add f3 int, add f4 int after f3, add f5 int, add f6 int first; +select * from t1; + +drop table if exists t1; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); + +alter table t1 drop f5, + add f6 int default 6 , add f7 int first, add f8 int default 8 after f3, + modify f3 timestamp first, modify f6 int after f2, modify f1 text, modify f2 text after f4; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int, primary key(f1, f3)); +insert into t1 values(1, 2, 3), (11, 22, 33); +\d+ t1 +select * from t1; +alter table t1 modify f3 int first, modify f1 int after f2; +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 int, f3 int); +insert into t1 values(1, 2, 3), (11, 12, 13), (21, 22, 23); +select * from t1; +alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 * 10) stored first; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2)); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; +\d+ t1 +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 int, primary key(f1, f3)); +insert into t1 values(1, 'a', 1), (2, 'b', 2); +\d+ t1 +select * from t1; + +alter table t1 modify f1 text after f3, add f10 int default 10 after f2; +\d+ t1 +select * from t1; + +-- unlogged table +drop table if exists t1 cascade; +create unlogged table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2)); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +\d+ t1 +select * from t1; + +\d+ t1 +select * from t1; + +--DTS +drop table if exists unit cascade; +CREATE TABLE unit +( + f11 INTEGER CHECK (f11 >=2), + f12 bool, + f13 text, + f14 varchar(20), + primary key (f11,f12) +); + +insert into unit values(2,3,4,5); +insert into unit values(3,4,5,6); +ALTER TABLE unit ADD f1 int CHECK (f1 >=10) FIRST; +insert into unit values (10,6,1,1,1); +insert into unit values (11,7,1,1,1); +ALTER TABLE unit ADD f2 int CHECK (f2 >=10) after f11; +select * from unit; +ALTER TABLE unit MODIFY f12 int FIRST; +select * from unit; +drop table if exists unit cascade; + +drop table t1 cascade; +create table t1(f1 int, f2 text, f3 int, f4 bool, f5 int generated always as (f1 + f3) stored); +insert into t1 values(1, 'aaa', 3, true); +insert into t1 values(11, 'bbb', 33, false); +insert into t1 values(111, 'ccc', 333, true); +insert into t1 values(1111, 'ddd', 3333, true); + +create view t1_view1 as select * from t1; +select * from t1_view1; +alter table t1 modify f1 int after f2, modify f3 int first; +drop view t1_view1; +create view t1_view1 as select * from t1; +alter table t1 modify f1 int after f2, modify f3 int first; +drop table t1 cascade; + +create table t1(f1 int, f2 text, f3 int, f4 bigint, f5 int generated always as (f1 + f3) stored); +insert into t1 values(1, 'aaa', 3, 1); +insert into t1 values(11, 'bbb', 33, 2); +insert into t1 values(111, 'ccc', 333, 3); +insert into t1 values(1111, 'ddd', 3333, 4); + +create view t1_view1 as select * from t1; +select * from t1_view1; +alter table t1 add f6 int first, add f7 int after f4, modify f1 int after f2, modify f3 int first; +select * from t1_view1; +drop view t1_view1; + +create view t1_view2 as select f1, f3, f5 from t1 where f2='aaa'; +select * from t1_view2; +alter table t1 add f8 int first, add f9 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view2; +drop view t1_view2; + +create view t1_view3 as select * from (select f1+f3, f5 from t1); +select * from t1_view3; +alter table t1 add f10 int first, add f11 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view3; +drop view t1_view3; + +create view t1_view4 as select * from (select abs(f1+f3) as col1, abs(f5) as col2 from t1); +select * from t1_view4; +alter table t1 add f12 int first, add f13 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view4; +drop view t1_view4; + +create view t1_view5 as select * from (select * from t1); +select * from t1_view5; +alter table t1 add f14 int first, add f15 int after f4, modify f1 int after f2, modify f3 int first; +select * from t1_view5; +drop view t1_view5; + +create view t1_view6 as select f1, f3, f5 from t1 where f2='aaa'; +select * from t1_view6; +alter table t1 modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first; +select * from t1_view6; +drop view t1_view6; +drop table t1 cascade; + +-- dts for add +drop table if exists test_d; +create table test_d (f2 int primary key, f3 bool, f5 text); +insert into test_d values(1,2,3), (2,3,4), (3,4,5); +select * from test_d; +alter table test_d add f1 int default 1,add f11 text check (f11 >=2) first; +select * from test_d; + +drop table if exists test_d; +create table test_d (f2 int primary key, f3 bool, f5 text); +insert into test_d values(1,2,3), (2,3,4), (3,4,5); +select * from test_d; +alter table test_d add f1 int default 1; +alter table test_d add f11 text check (f11 >=2) first; +select * from test_d; +drop table if exists test_d; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; +alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '777' first, + add f8 int default 8, add f9 int primary key auto_increment after f6; +select * from t1; + +drop table if exists t1 cascade; +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 varbit(8), f5 bool); +insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false); +select * from t1; +select * from t1; +drop table if exists t1 cascade; + +drop table if exists t1 cascade; +create table t1(f1 int comment 'f1 is int', f2 varchar(20), f3 timestamp comment 'f3 is timestamp', f4 varbit(8), f5 bool comment 'f5 is boolean'); +SELECT pg_get_tabledef('t1'); +alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '7' first, add f8 int primary key auto_increment after f2; +SELECT pg_get_tabledef('t1'); +alter table t1 modify f1 int after f3, modify f5 bool first, modify f3 timestamp after f4; +SELECT pg_get_tabledef('t1'); +drop table if exists t1 cascade; + + + + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 boolean primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int1 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int2 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int4 primary key auto_increment ; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int8 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 int16 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 float4 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 add f6 float8 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 boolean primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int1 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int2 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int4 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int not null, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int8 primary key auto_increment; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 int16 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 float4 primary key auto_increment not null; +drop table if exists t1 cascade; + +create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool); +alter table t1 modify column f1 float8 primary key auto_increment not null; +drop table if exists t1 cascade; + + +-- \c postgres +-- drop database test_first_after_B; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_001.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_001.sql new file mode 100644 index 0000000000..eae044df97 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_001.sql @@ -0,0 +1,789 @@ +create table altertable_rangeparttable +( + c1 int, + c2 float, + c3 real, + c4 text +) +partition by range (c1, c2, c3, c4) +( + partition altertable_rangeparttable_p1 values less than (10, 10.00, 19.156, 'h'), + partition altertable_rangeparttable_p2 values less than (20, 20.89, 23.75, 'k'), + partition altertable_rangeparttable_p3 values less than (30, 30.45, 32.706, 's') +); + +alter table altertable_rangeparttable add partition altertable_rangeparttable_p4 values less than (36, 45.25, 37.39, 'u'); + +create table altertable_rangeparttable2 +( + c1 int, + c2 float, + c3 real, + c4 text +) +partition by range (abs(c1)) +( + partition altertable_rangeparttable_p1 values less than (10), + partition altertable_rangeparttable_p2 values less than (20), + partition altertable_rangeparttable_p3 values less than (30) +); +alter table altertable_rangeparttable2 add partition altertable_rangeparttable_p4 values less than (36); + + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +alter table range_range add partition p_202001 values less than ('202002') (subpartition p_202001_a values less than('2') , subpartition p_202001_b values less than('3') ); + +-- comes from function_get_table_def.sql +create table table_range4 (id int primary key, a date, b varchar) +partition by range (id) +( + partition table_range4_p1 start (10) end (40) every (10), + partition table_range4_p2 end (70), + partition table_range4_p3 start (70), + partition table_range4_p4 start (100) end (150) every (20) +); + +alter table table_range4 add partition table_range4_p5 start (150) end (300) every (20); +alter table table_range4 add partition table_range4_p6 values less than (310), add partition table_range4_p7 values less than (320); + +create table table_interval1 (id int, a date, b varchar) +partition by range (a) +interval ('1 day') +( + partition table_interval1_p1 values less than('2020-03-01'), + partition table_interval1_p2 values less than('2020-05-01'), + partition table_interval1_p3 values less than('2020-07-01'), + partition table_interval1_p4 values less than(maxvalue) +); +alter table table_interval1 add partition table_interval1_p5 start ('2020-08-01') end ('2020-09-01'); + +create table table_list1 (id int, a date, b varchar) +partition by list (id) +( + partition table_list1_p1 values (1, 2, 3, 4), + partition table_list1_p2 values (5, 6, 7, 8), + partition table_list1_p3 values (9, 10, 11, 12) +); +alter table table_list1 add partition table_list1_p4 values (13, 14, 15, 16); +alter table table_list1 add partition table_list1_p5 values (default); + +create table table_list2 (id int, a date, b varchar) +partition by list (b) +( + partition table_list2_p1 values ('1', '2', '3', '4'), + partition table_list2_p2 values ('5', '6', '7', '8'), + partition table_list2_p3 values ('9', '10', '11', '12') +); +alter table table_list2 add partition table_list2_p4 values ('13', '14', '15', '16'); +alter table table_list2 add partition table_list2_p5 values ('DEFAULT'); +alter table table_list2 add partition table_list2_p6 values ('default'); +alter table table_list2 add partition table_list2_p7 values (default); + + +create table table_list3 (id int, a date, b varchar) +partition by list (id, b) +( + partition table_list3_p1 values ((1, 'a'), (2,'b'), (3,'c'), (4,'d')) , + partition table_list3_p2 values ((5, 'a'), (6,'b'), (7,'c'), (8,'d')) + +); +alter table table_list3 add partition table_list3_p3 values ((15, 'a'), (16,'b'), (17,'c'), (18,'d')); +alter table table_list3 add partition table_list3_p4 values (default); + +create table table_hash1 (id int, a date, b varchar) +partition by hash (id) +( + partition table_hash1_p1, + partition table_hash1_p2, + partition table_hash1_p3 +); + + +CREATE TABLE list_hash_2 ( + col_1 integer primary key, + col_2 integer, + col_3 character varying(30) unique, + col_4 integer +) +WITH (orientation=row, compression=no) +PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + ( + SUBPARTITION p_hash_1_1, + SUBPARTITION p_hash_1_2, + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_hash_3_1, + SUBPARTITION p_hash_3_2 + ), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_hash_4_1, + SUBPARTITION p_hash_4_2, + SUBPARTITION p_hash_4_3, + SUBPARTITION p_hash_4_4, + SUBPARTITION p_hash_4_5 + ), + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40), + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_hash_6_1, + SUBPARTITION p_hash_6_2, + SUBPARTITION p_hash_6_3, + SUBPARTITION p_hash_6_4, + SUBPARTITION p_hash_6_5 + ), + PARTITION p_list_7 VALUES (DEFAULT) +); + +alter table list_hash_2 add partition p_list_8 values (51,52,53,54,55,56,57,58,59,60) (subpartition p_hash_8_1, subpartition p_hash_8_2, subpartition p_hash_8_3); + +-- drop table table_list3; +create table table_list3 (id int, a date, b varchar) +partition by list (id, b) +( + partition table_list3_p1 values ((1, 'a'), (2,'b'), (3,'c'), (4,'d')) , + partition table_list3_p2 values ((5,'a'), (6,'b'), (7,'NULL'), (8,NULL)) +); + +alter table table_list3 add partition table_list3_p3 values ((15, 'a'), (16,'default'), (17,'NULL'), (18,NULL)); + +alter table table_list3 add partition table_list3_p4 values (default); + +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); + +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); + +CREATE TABLE range2_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id, product_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01', 200), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01', 500), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01', 800), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01', 1200) +); + +INSERT INTO range2_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range2_sales_idx ON range2_sales(product_id) LOCAL; + +ALTER TABLE range2_sales TRUNCATE PARTITION time_2008; +ALTER TABLE range2_sales TRUNCATE PARTITION FOR VALUES('2011-04-01', 700) ; + +ALTER TABLE range2_sales DROP PARTITION time_2009; +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01', 600); + +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(100), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); + +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X', 'A', 'bbb'); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel6 VALUES ('NULL', 'asdasd', 'hahaha'); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel7 VALUES (NULL); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel8 VALUES ('DEFAULT', 'wawawa'); +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel9 VALUES (DEFAULT); +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel9; + +ALTER TABLE range_list_sales SPLIT partition customer4 INTO ( + partition customer4_p1 values less than (900) + ( + subpartition customer4_p1_s1 VALUES ('11'), + subpartition customer4_p1_s2 VALUES ('12') + ), + partition customer4_p2 values less than (1000) + ( + subpartition customer4_p2_s1 VALUES ('11'), + subpartition customer4_p2_s2 VALUES ('12') + ) +); + +ALTER TABLE range_list_sales truncate partition customer2 update global index; +ALTER TABLE range_list_sales truncate partition for (300); +ALTER TABLE range_list_sales truncate partition customer5_channel3; + +ALTER TABLE range_list_sales DROP PARTITION customer2; +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; + + +create table test_list (col1 int, col2 int) +partition by list(col1) +( +partition p1 values (2000), +partition p2 values (3000), +partition p3 values (4000), +partition p4 values (5000) +); + +INSERT INTO test_list VALUES(2000, 2000); +INSERT INTO test_list VALUES(3000, 3000); +alter table test_list add partition p5 values (6000); +INSERT INTO test_list VALUES(6000, 6000); + +create table t1 (col1 int, col2 int); + +alter table test_list exchange partition (p1) with table t1 VERBOSE; +alter table test_list truncate partition p2; +alter table test_list drop partition p5; + + +create table test_hash (col1 int, col2 int) +partition by hash(col1) +( +partition p1, +partition p2 +); + +INSERT INTO test_hash VALUES(1, 1); +INSERT INTO test_hash VALUES(2, 2); +INSERT INTO test_hash VALUES(3, 3); +INSERT INTO test_hash VALUES(4, 4); + +alter table test_hash exchange partition (p1) with table t1 WITHOUT VALIDATION; + +alter table test_hash truncate partition p2; + + +CREATE TABLE interval_sales +( + prod_id NUMBER(6), + cust_id NUMBER, + time_id DATE, + channel_id CHAR(1), + promo_id NUMBER(6), + quantity_sold NUMBER(3), + amount_sold NUMBER(10, 2) +) + PARTITION BY RANGE (time_id) + INTERVAL + ('1 MONTH') +( + PARTITION p0 VALUES LESS THAN (TO_DATE('1-1-2008', 'DD-MM-YYYY')), + PARTITION p1 VALUES LESS THAN (TO_DATE('6-5-2008', 'DD-MM-YYYY')) +); + +alter table interval_sales split partition p0 at (to_date('2007-02-10', 'YYYY-MM-DD')) into (partition p0_1, partition p0_2); + +alter table interval_sales split partition p0_1 into (partition p0_1_1 values less than (TO_DATE('1-1-2005', 'DD-MM-YYYY')), partition p0_1_2 values less than(TO_DATE('1-1-2006', 'DD-MM-YYYY')) ); + +alter table interval_sales split partition p0_2 into (partition p0_2_1 START (TO_DATE('8-5-2007', 'DD-MM-YYYY'), partition p0_2_2 START (TO_DATE('9-5-2007', 'DD-MM-YYYY')); + + +insert into interval_sales +values (1, 1, to_date('9-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('11-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('11-2-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('20-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('05-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('08-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('05-4-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('05-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-9-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-11-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-12-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-01-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-5-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-6-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-7-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales +values (1, 1, to_date('04-9-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); + +alter table interval_sales merge partitions p0_1, p0_2, p1 into partition p01; +alter table interval_sales merge partitions sys_p6, sys_p7, sys_p8 into partition sys_p6_p7_p8; +ALTER TABLE interval_sales RESET PARTITION; + +CREATE TABLE interval_sales1 +( + prod_id NUMBER(6), + cust_id NUMBER, + time_id DATE, + channel_id CHAR(1), + promo_id NUMBER(6), + quantity_sold NUMBER(3), + amount_sold NUMBER(10, 2) +) + PARTITION BY RANGE (time_id) + INTERVAL +('1 MONTH') +(PARTITION p0 VALUES LESS THAN (TO_DATE('1-1-2008', 'DD-MM-YYYY')), + PARTITION p1 VALUES LESS THAN (TO_DATE('6-5-2008', 'DD-MM-YYYY')) +); +create index interval_sales1_time_id_idx on interval_sales1 (time_id) local; +create index interval_sales1_quantity_sold_idx on interval_sales1 (quantity_sold) local; +alter table interval_sales1 split partition p0 at (to_date('2007-02-10', 'YYYY-MM-DD')) into (partition p0_1, partition p0_2); + +insert into interval_sales1 +values (1, 1, to_date('9-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('11-2-2007', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('11-2-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('20-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('05-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('08-2-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('05-4-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('05-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-9-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-11-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-12-2008', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-01-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-5-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-6-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-7-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-8-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); +insert into interval_sales1 +values (1, 1, to_date('04-9-2009', 'DD-MM-YYYY'), 'a', 1, 1, 1); + +alter table interval_sales1 merge partitions p0_1, p0_2, p1 into partition p01 UPDATE GLOBAL INDEX; + + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); + +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); + +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); + +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); + +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); + + +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL; +CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL; +EXECUTE partition_get_partitionno('range_sales'); +ALTER TABLE range_sales ADD PARTITION time_default VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_sales DROP PARTITION time_2008; +ALTER TABLE range_sales SPLIT PARTITION time_default AT ('2013-01-01') INTO (PARTITION time_2012, PARTITION time_default_temp); +ALTER TABLE range_sales RENAME PARTITION time_default_temp TO time_default; +ALTER TABLE range_sales SPLIT PARTITION time_default +INTO (PARTITION time_2013 VALUES LESS THAN ('2014-01-01'), + PARTITION time_2014 VALUES LESS THAN ('2015-01-01'), + PARTITION time_default_temp VALUES LESS THAN (MAXVALUE)); +ALTER TABLE range_sales MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX; +ALTER TABLE range_sales TRUNCATE PARTITION time_2011 UPDATE GLOBAL INDEX; +ALTER TABLE range_sales disable row movement; +ALTER TABLE range_sales enable row movement; + +CREATE TABLE interval_sales2 +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2)DEFAULT CHARACTER SET +) +PARTITION BY RANGE (time_id) INTERVAL ('1 year') +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01') +); +CREATE INDEX interval_sales2_idx1 ON interval_sales2(product_id) LOCAL; +CREATE INDEX interval_sales2_idx2 ON interval_sales2(time_id) GLOBAL; + +-- add/drop partition +INSERT INTO interval_sales2 VALUES (1,1,'2013-01-01','A',1,1,1); +INSERT INTO interval_sales2 VALUES (2,2,'2012-01-01','B',2,2,2); +ALTER TABLE interval_sales2 DROP PARTITION time_2008; + + +-- merge/split partition +ALTER TABLE interval_sales2 SPLIT PARTITION time_2009 AT ('2009-01-01') INTO (PARTITION time_2008, PARTITION time_2009_temp); +ALTER TABLE interval_sales2 RENAME PARTITION time_2009_temp TO time_2009; +ALTER TABLE interval_sales2 MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX; + + +-- truncate partition with gpi +ALTER TABLE interval_sales2 TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX; + + +--reset +ALTER TABLE interval_sales2 RESET PARTITION; +ALTER TABLE interval_sales2 disable row movement; + +create table unit_varchar(a1 varchar default '1', a2 varchar(2), a3 varchar(2 byte) default 'ye', a4 varchar(2 character) default '���', a5 varchar(2 char) default 'Ĭ��'); +create table unit_varchar2(a1 varchar2 default '1', a2 varchar2(2) default 'ha', a3 varchar2(2 byte), a4 varchar2(2 character) default '���', a5 varchar2(2 char) default 'Ĭ��'); +create table unit_char(a1 char default '1', a2 char(2) default 'ha', a3 char(2 byte) default 'ye', a4 char(2 character), a5 char(2 char) default 'Ĭ��'); +create table unit_nchar(a1 nchar default '1', a2 nchar(2) default 'ha', a3 nchar(2) default 'ye', a4 nchar(2) default '���', a5 nchar(2)); +create table unit_nvarchar2(a1 nvarchar2 default '1', a2 nvarchar2(2) default 'ha', a3 nvarchar2(2) default 'ye', a4 nvarchar2(2) default '���', a5 nvarchar2(2)); + +insert into unit_varchar (a1) values ('1111111111123���մ�����11111111111111111111111111111111�������մ�ʵ��ʵ1'); +insert into unit_varchar (a2) values ('12 '); +-- exceed +insert into unit_varchar (a2) values ('��'); +insert into unit_varchar (a3) values ('12 '); +-- exceed +insert into unit_varchar (a3) values ('��'); +insert into unit_varchar (a4) values ('��2 '); +-- exceed +insert into unit_varchar (a4) values ('��23 '); +-- exceed +insert into unit_varchar (a4) values ('223 '); +insert into unit_varchar (a5) values ('��2 '); +-- exceed +insert into unit_varchar (a5) values ('��23 '); +-- exceed +insert into unit_varchar (a5) values ('223 '); +-- exceed +update unit_varchar set a2='�� '; +update unit_varchar set a3='��a '; +-- exceed +update unit_varchar set a5='������'; +update unit_varchar set a5='����'; +select * from unit_varchar; + +insert into unit_varchar2 (a1) values ('��111111111123���մ�����11111111111111111111111111111111�������մ�ʵ��ʵ1'); +insert into unit_varchar2 (a2) values ('12 '); +-- exceed +insert into unit_varchar2 (a2) values ('��'); +insert into unit_varchar2 (a3) values ('12 '); +-- exceed +insert into unit_varchar2 (a3) values ('��'); +insert into unit_varchar2 (a4) values ('��2 '); +-- exceed +insert into unit_varchar2 (a4) values ('��23 '); +-- exceed +insert into unit_varchar2 (a4) values ('223 '); +insert into unit_varchar2 (a5) values ('��2 '); +-- exceed +insert into unit_varchar2 (a5) values ('��23 '); +-- exceed +insert into unit_varchar2 (a5) values ('223 '); +ALTER TABLE unit_varchar2 ALTER COLUMN a2 SET data TYPE char(1 char) USING a2::char(1 char); +insert into unit_varchar2 (a2) values ('һ '); +alter table unit_varchar2 modify column a3 varchar2(2 char) default '�ں�'; +-- exceed +insert into unit_varchar2 (a2) values ('һe'); +insert into unit_varchar2 (a1) values(default); +select * from unit_varchar2; + +-- exceed +insert into unit_char (a1) values ('1111111111123���մ�����11111111111111111111111111111111�������մ�ʵ��ʵ1'); +-- exceed +insert into unit_nchar (a1) values ('�� '); +insert into unit_nchar (a1) values ('1 '); +insert into unit_char (a2) values ('12 '); +-- exceed +insert into unit_char (a2) values ('��'); +insert into unit_char (a3) values ('12 '); +-- exceed +insert into unit_char (a3) values ('��'); +insert into unit_char (a4) values ('��2 '); +-- exceed +insert into unit_char (a4) values ('��23 '); +-- exceed +insert into unit_char (a4) values ('223 '); +insert into unit_char (a5) values ('��2 '); +-- exceed +insert into unit_char (a5) values ('��23 '); +-- exceed +insert into unit_char (a5) values ('223 '); +ALTER table unit_char ADD COLUMN a6 varchar(3 char) default 'Ĭ��ֵ'; +insert into unit_char (a6) values ('��1a '); +-- exceed +insert into unit_char (a6) values ('1234'); +update unit_char set a4='��'; +-- execeed +update unit_char set a5='һ��3'; +select * from unit_char; + +-- exceed +insert into unit_nchar (a1) values ('1111111111123���մ�����11111111111111111111111111111111�������մ�ʵ��ʵ1'); +insert into unit_nchar (a1) values ('�� '); +insert into unit_nchar (a2) values ('���� '); +-- exceed +insert into unit_nchar (a2) values ('123 '); +-- exceed +insert into unit_nchar (a2) values ('��'); +insert into unit_nchar (a3) values ('12 '); +insert into unit_nchar (a3) values ('��'); +insert into unit_nchar (a4) values ('��2 '); +-- exceed +insert into unit_nchar (a4) values ('��23 '); +-- exceed +insert into unit_nchar (a4) values ('223 '); +insert into unit_nchar (a5) values ('��2 '); +-- exceed +insert into unit_nchar (a5) values ('��23 '); +-- exceed +insert into unit_nchar (a5) values ('223 '); + +-- exceed +insert into unit_nvarchar2 (a1) values ('1111111111123���մ�����11111111111111111111111111111111�������մ�ʵ��ʵ1'); +insert into unit_nvarchar2 (a1) values ('�� '); +insert into unit_nvarchar2 (a2) values ('���� '); +-- exceed +insert into unit_nvarchar2 (a2) values ('123 '); +-- exceed +insert into unit_nvarchar2 (a2) values ('��'); +insert into unit_nvarchar2 (a3) values ('12 '); +insert into unit_nvarchar2 (a3) values ('��'); +insert into unit_nvarchar2 (a4) values ('��2 '); +insert into unit_nvarchar2 (a5) values ('��2 '); +-- exceed +insert into unit_nvarchar2 (a5) values ('��23 '); +-- exceed +insert into unit_nvarchar2 (a5) values ('223 '); + + + +create table test_char(col char(20 char)); +insert into test_char values ('����һ������'), ('asdһ��������bsd'), ('һ�����������߰˾�ʮһ�����������߰˾�ʮ '), ('һ2 '); +select col, length(col), lengthb(col) from test_char; + +create table test_varchar(col varchar(20 char)); +insert into test_varchar values ('����һ������'), ('asdһ��������bsd'), ('һ�����������߰˾�ʮһ�����������߰˾�ʮ '), ('һ2 '); +select col, length(col), lengthb(col) from test_varchar; + +create table test_charb(col char(20)); +insert into test_charb values ('����һ������'), ('asdһ��������bs '), ('һ���������� '), ('һ2 '); +select col, length(col), lengthb(col) from test_charb; + +create table test_varcharb(col varchar(20)); +insert into test_varcharb values ('����һ������'), ('asdһ��������bs '), ('һ���������� '), ('һ2 '); +select col, length(col), lengthb(col) from test_varcharb; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_002.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_002.sql new file mode 100644 index 0000000000..53940da925 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_002.sql @@ -0,0 +1,2011 @@ +-- +--FOR BLACKLIST FEATURE: REFERENCES/INHERITS/WITH OIDS/RULE/CREATE TYPE/DOMAIN is not supported. +-- + +-- +-- ALTER_TABLE +-- add attribute +-- + +CREATE TABLE atmp1 (initial int4); + +COMMENT ON TABLE tmp_wrong IS 'table comment'; +COMMENT ON TABLE atmp1 IS 'table comment'; +COMMENT ON TABLE atmp1 IS NULL; + +ALTER TABLE atmp1 ADD COLUMN xmin integer; -- fails + +ALTER TABLE atmp1 ADD COLUMN a int4 default 3; + +ALTER TABLE atmp1 ADD COLUMN b name; + +ALTER TABLE atmp1 ADD COLUMN c text; + +ALTER TABLE atmp1 ADD COLUMN d float8; + +ALTER TABLE atmp1 ADD COLUMN e float4; + +ALTER TABLE atmp1 ADD COLUMN f int2; + +ALTER TABLE atmp1 ADD COLUMN g polygon; + +ALTER TABLE atmp1 ADD COLUMN h abstime; + +ALTER TABLE atmp1 ADD COLUMN i char; + +ALTER TABLE atmp1 ADD COLUMN j abstime[]; + +ALTER TABLE atmp1 ADD COLUMN k int4; + +ALTER TABLE atmp1 ADD COLUMN l tid; + +ALTER TABLE atmp1 ADD COLUMN m xid; + +ALTER TABLE atmp1 ADD COLUMN n oidvector; + +--ALTER TABLE atmp1 ADD COLUMN o lock; +ALTER TABLE atmp1 ADD COLUMN p smgr; + +ALTER TABLE atmp1 ADD COLUMN q point; + +ALTER TABLE atmp1 ADD COLUMN r lseg; + +ALTER TABLE atmp1 ADD COLUMN s path; + +ALTER TABLE atmp1 ADD COLUMN t box; + +ALTER TABLE atmp1 ADD COLUMN u tinterval; + +ALTER TABLE atmp1 ADD COLUMN v timestamp; + +ALTER TABLE atmp1 ADD COLUMN w interval; + +ALTER TABLE atmp1 ADD COLUMN x float8[]; + +ALTER TABLE atmp1 ADD COLUMN y float4[]; + +ALTER TABLE atmp1 ADD COLUMN z int2[]; + +INSERT INTO atmp1 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp1; + +----drop table tmp; + +-- the wolf bug - schema mods caused inconsistent row descriptors +CREATE TABLE atmp2 ( + initial int4 +); + +ALTER TABLE atmp2 ADD COLUMN a int4; + +ALTER TABLE atmp2 ADD COLUMN b name; + +ALTER TABLE atmp2 ADD COLUMN c text; + +ALTER TABLE atmp2 ADD COLUMN d float8; + +ALTER TABLE atmp2 ADD COLUMN e float4; + +ALTER TABLE atmp2 ADD COLUMN f int2; + +ALTER TABLE atmp2 ADD COLUMN g polygon; + +ALTER TABLE atmp2 ADD COLUMN h abstime; + +ALTER TABLE atmp2 ADD COLUMN i char; + +ALTER TABLE atmp2 ADD COLUMN j abstime[]; + +ALTER TABLE atmp2 ADD COLUMN k int4; + +ALTER TABLE atmp2 ADD COLUMN l tid; + +ALTER TABLE atmp2 ADD COLUMN m xid; + +ALTER TABLE atmp2 ADD COLUMN n oidvector; + +--ALTER TABLE atmp2 ADD COLUMN o lock; +ALTER TABLE atmp2 ADD COLUMN p smgr; + +ALTER TABLE atmp2 ADD COLUMN q point; + +ALTER TABLE atmp2 ADD COLUMN r lseg; + +ALTER TABLE atmp2 ADD COLUMN s path; + +ALTER TABLE atmp2 ADD COLUMN t box; + +ALTER TABLE atmp2 ADD COLUMN u tinterval; + +ALTER TABLE atmp2 ADD COLUMN v timestamp; + +ALTER TABLE atmp2 ADD COLUMN w interval; + +ALTER TABLE atmp2 ADD COLUMN x float8[]; + +ALTER TABLE atmp2 ADD COLUMN y float4[]; + +ALTER TABLE atmp2 ADD COLUMN z int2[]; + +INSERT INTO atmp2 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp2; + +----drop table tmp; + + +-- +-- rename - check on both non-temp and temp tables +-- +CREATE TABLE atmp3 (regtable int); +-- Enforce use of COMMIT instead of 2PC for temporary objects +\set VERBOSITY verbose +-- CREATE TEMP TABLE tmp (tmptable int); + +ALTER TABLE atmp3 RENAME TO tmp_new; + +-- SELECT * FROM tmp; +-- SELECT * FROM tmp_new; + +-- ALTER TABLE tmp RENAME TO tmp_new2; + +SELECT * FROM tmp_new; +-- SELECT * FROM tmp_new2; + +----drop table tmp_new; +-- ----drop table tmp_new2; +CREATE TABLE atmp4 (ch1 character(1)); +insert into atmp4 values ('asdv'); +----drop table tmp; +\set VERBOSITY default + + +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); + +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + +CREATE TABLE stud_emp ( + name text, + age int4, + location point, + salary int4, + manager name, + gpa float8, + percent int4 +) with(autovacuum_enabled = off); + +-- ALTER TABLE ... RENAME on non-table relations +-- renaming indexes (FIXME: this should probably test the index's functionality) +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1; + +ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1; + +-- renaming views +CREATE VIEW tmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE tmp_view RENAME TO tmp_view_new; + +-- hack to ensure we get an indexscan here +ANALYZE tenk1; +set enable_seqscan to off; +set enable_bitmapscan to off; +-- 5 values, sorted +SELECT unique1 FROM tenk1 WHERE unique1 < 5 ORDER BY unique1; +reset enable_seqscan; +reset enable_bitmapscan; + +DROP VIEW tmp_view_new; +-- toast-like relation name +alter table stud_emp rename to pg_toast_stud_emp; +alter table pg_toast_stud_emp rename to stud_emp; + +-- renaming index should rename constraint as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraint +ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); +ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo; + +-- renaming constraint should rename index as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +DROP INDEX onek_unique1_constraint; -- to see whether it's there +ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo; +DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraints vs. inheritance +CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); +\d constraint_rename_test +CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test); +create table constraint_rename_test2 (like constraint_rename_test ); +\d constraint_rename_test2 +ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail +ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok +\d constraint_rename_test +\d constraint_rename_test2 +ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT; +ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok +\d constraint_rename_test +\d constraint_rename_test2 +ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a); +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok +\d constraint_rename_test +\d constraint_rename_test2 +----drop table constraint_rename_test2; +----drop table constraint_rename_test; +ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a); + +-- FOREIGN KEY CONSTRAINT adding TEST + +CREATE TABLE tmp2 (a int primary key); + +CREATE TABLE tmp3 (a int, b int); + +CREATE TABLE tmp4 (a int, b int, unique(a,b)); + +CREATE TABLE tmp5 (a int, b int); + +-- Insert rows into tmp2 (pktable) +INSERT INTO tmp2 values (1); +INSERT INTO tmp2 values (2); +INSERT INTO tmp2 values (3); +INSERT INTO tmp2 values (4); + +-- Insert rows into tmp3 +INSERT INTO tmp3 values (1,10); +INSERT INTO tmp3 values (1,20); +INSERT INTO tmp3 values (5,50); + +-- Try (and fail) to add constraint due to invalid source columns +ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full; + +-- Try (and fail) to add constraint due to invalide destination columns explicitly given +ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full; + +-- Try (and fail) to add constraint due to invalid data +ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; + +-- Delete failing row +alter table tmp3 replica identity full; +DELETE FROM tmp3 where a=5; + +-- Try (and succeed) +ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; +ALTER TABLE tmp3 drop constraint tmpconstr; + +INSERT INTO tmp3 values (5,50); + +-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate +ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full NOT VALID; +ALTER TABLE tmp3 validate constraint tmpconstr; + +-- Delete failing row +DELETE FROM tmp3 where a=5; + +-- Try (and succeed) and repeat to show it works on already valid constraint +ALTER TABLE tmp3 validate constraint tmpconstr; +ALTER TABLE tmp3 validate constraint tmpconstr; + +-- Try a non-verified CHECK constraint +ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +DELETE FROM tmp3 WHERE NOT b > 10; +ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds + +-- Test inherited NOT VALID CHECK constraints +select * from tmp3; + +-- Try (and fail) to create constraint from tmp5(a) to tmp4(a) - unique constraint on +-- tmp4 is a,b + +ALTER TABLE tmp5 add constraint tmpconstr foreign key(a) references tmp4(a) match full; + +----drop table tmp5; + +----drop table tmp4; + +----drop table tmp3; + +----drop table tmp2; + +-- NOT VALID with plan invalidation -- ensure we don't use a constraint for +-- exclusion until validated +set constraint_exclusion TO 'partition'; +create table nv_parent (d date); +create table nv_child_2010 () inherits (nv_parent); +create table nv_child_2010 (like nv_parent); +create table nv_child_2011 () inherits (nv_parent); +create table nv_child_2011 (like nv_parent including all); +alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid; +alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid; +explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31'; +create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent); +explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date; +explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; +-- after validation, the constraint should be used +alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check; +explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; + + +-- Foreign key adding test with mixed types + +-- Note: these tables are TEMP to avoid name conflicts when this test +-- is run in parallel with foreign_key.sql. + +CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); +INSERT INTO PKTABLE VALUES(42); +CREATE TABLE FKTABLE (ftest1 inet); +-- This next should fail, because int=inet does not exist +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; +-- This should also fail for the same reason, but here we +-- give the column name +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1); +----drop table FKTABLE; +-- This should succeed, even though they are different types, +-- because int=int8 exists and is a member of the integer opfamily +CREATE TABLE FKTABLE1 (ftest1 int8); +ALTER TABLE FKTABLE1 ADD FOREIGN KEY(ftest1) references pktable; +-- Check it actually works +INSERT INTO FKTABLE1 VALUES(42); -- should succeed +INSERT INTO FKTABLE1 VALUES(43); -- should fail +----drop table FKTABLE; +-- This should fail, because we'd have to cast numeric to int which is +-- not an implicit coercion (or use numeric=numeric, but that's not part +-- of the integer opfamily) +CREATE TABLE FKTABLE2 (ftest1 numeric); +ALTER TABLE FKTABLE2 ADD FOREIGN KEY(ftest1) references pktable; +----drop table FKTABLE; +----drop table PKTABLE; +-- On the other hand, this should work because int implicitly promotes to +-- numeric, and we allow promotion on the FK side +CREATE TABLE PKTABLE1 (ptest1 numeric PRIMARY KEY); +INSERT INTO PKTABLE1 VALUES(42); +CREATE TABLE FKTABLE3 (ftest1 int); +ALTER TABLE FKTABLE3 ADD FOREIGN KEY(ftest1) references pktable1; +-- Check it actually works +INSERT INTO FKTABLE3 VALUES(42); -- should succeed +INSERT INTO FKTABLE3 VALUES(43); -- should fail +----drop table FKTABLE; +----drop table PKTABLE; + +CREATE TABLE PKTABLE2 (ptest1 int, ptest2 inet, + PRIMARY KEY(ptest1, ptest2)); +-- This should fail, because we just chose really odd types +CREATE TABLE FKTABLE4 (ftest1 cidr, ftest2 timestamp); +ALTER TABLE FKTABLE4 ADD FOREIGN KEY(ftest1, ftest2) references pktable2; +----drop table FKTABLE; +-- Again, so should this... +CREATE TABLE FKTABLE5 (ftest1 cidr, ftest2 timestamp); +ALTER TABLE FKTABLE5 ADD FOREIGN KEY(ftest1, ftest2) + references pktable2(ptest1, ptest2); +----drop table FKTABLE; +-- This fails because we mixed up the column ordering +CREATE TABLE FKTABLE6 (ftest1 int, ftest2 inet); +ALTER TABLE FKTABLE6 ADD FOREIGN KEY(ftest1, ftest2) + references pktable2(ptest2, ptest1); +-- As does this... +ALTER TABLE FKTABLE6 ADD FOREIGN KEY(ftest2, ftest1) + references pktable2(ptest1, ptest2); + +-- temp tables should go away by themselves, need not drop them. + +-- test check constraint adding + +create table at1acc1 ( test int ); +-- add a check constraint +alter table at1acc1 add constraint at1acc_test1 check (test>3); +-- should fail +insert into at1acc1 (test) values (2); +-- should succeed +insert into at1acc1 (test) values (4); +----drop table atacc1; + +-- let's do one where the check fails when added +create table at2acc1 ( test int ); +-- insert a soon to be failing row +insert into at2acc1 (test) values (2); +-- add a check constraint (fails) +alter table at2acc1 add constraint at2acc_test1 check (test>3); +insert into at2acc1 (test) values (4); +----drop table atacc1; + +-- let's do one where the check fails because the column doesn't exist +create table at3acc1 ( test int ); +-- add a check constraint (fails) +alter table at3acc1 add constraint at3acc_test1 check (test1>3); +----drop table atacc1; + +-- something a little more complicated +create table at4acc1 ( test int, test2 int, test3 int); +-- add a check constraint (fails) +alter table at4acc1 add constraint at4acc_test1 check (test+test23), test2 int); +alter table at5acc1 add check (test2>test); +-- should fail for $2 +insert into at5acc1 (test2, test) values (3, 4); +----drop table atacc1; + +-- inheritance related tests +create table at6acc1 (test int); +create table at6acc2 (test2 int); +create table at6acc3 (test3 int) inherits (at6acc1, at6acc2); +alter table at6acc2 add constraint foo check (test2>0); +-- fail and then succeed on atacc2 +insert into at6acc2 (test2) values (-3); +insert into at6acc2 (test2) values (3); +-- fail and then succeed on atacc3 +insert into at6acc3 (test2) values (-3); +insert into at6acc3 (test2) values (3); +----drop table atacc3; +----drop table atacc2; +----drop table atacc1; + +-- same things with one created with INHERIT +create table at7acc1 (test int); +create table at7acc2 (test2 int); +create table at7acc3 (test3 int) inherits (at7acc1, at7acc2); +alter table at7acc3 no inherit at7acc2; +-- fail +alter table at7acc3 no inherit at7acc2; +-- make sure it really isn't a child +insert into at7acc3 (test2) values (3); +select test2 from atacc2; +-- fail due to missing constraint +alter table at7acc2 add constraint foo check (test2>0); +alter table at7acc3 inherit atacc2; +-- fail due to missing column +alter table at7acc3 rename test2 to testx; +alter table at7acc3 inherit atacc2; +-- fail due to mismatched data type +alter table at7acc3 add test2 bool; +alter table at7acc3 inherit atacc2; +alter table at7acc3 drop test2; +-- succeed +alter table at7acc3 add test2 int; +alter table at7acc3 replica identity full; +update at7acc3 set test2 = 4 where test2 is null; +alter table at7acc3 add constraint foo check (test2>0); +alter table at7acc3 inherit at7acc2; +-- fail due to duplicates and circular inheritance +alter table at7acc3 inherit at7acc2; +alter table at7acc2 inherit at7acc3; +alter table at7acc2 inherit at7acc2; +-- test that we really are a child now (should see 4 not 3 and cascade should go through) +select test2 from at7acc2; +----drop table atacc2 cascade; +----drop table atacc1; + +-- adding only to a parent is allowed as of 9.2 + +create table at8acc1 (test int); +create table at8acc2 (test2 int) inherits (at8acc1); +-- ok: +alter table at8acc1 add constraint foo check (test>0) no inherit; +-- check constraint is not there on child +insert into at8acc2 (test) values (-3); +-- check constraint is there on parent +insert into at8acc1 (test) values (-3); +insert into at8acc1 (test) values (3); +-- fail, violating row: +alter table at8acc2 add constraint foo check (test>0) no inherit; +----drop table atacc2; +----drop table atacc1; + +-- test unique constraint adding + +create table at9acc1 ( test int ) with oids; +-- add a unique constraint +alter table at9acc1 add constraint at9acc_test1 unique (test); +-- insert first value +insert into at9acc1 (test) values (2); +-- should fail +insert into at9acc1 (test) values (2); +-- should succeed +insert into at9acc1 (test) values (4); +-- try adding a unique oid constraint +alter table at9acc1 add constraint atacc_oid1 unique(oid); +-- try to create duplicates via alter table using - should fail +alter table at9acc1 alter column test type integer using 0; +----drop table atacc1; + +-- let's do one where the unique constraint fails when added +create table a1tacc1 ( test int ); +-- insert soon to be failing rows +insert into a1tacc1 (test) values (2); +insert into a1tacc1 (test) values (2); +-- add a unique constraint (fails) +alter table a1tacc1 add constraint a1tacc_test1 unique (test); +insert into a1tacc1 (test) values (3); +--drop table atacc1; + +-- let's do one where the unique constraint fails +-- because the column doesn't exist +create table a2tacc1 ( test int ); +-- add a unique constraint (fails) +alter table a2tacc1 add constraint a2tacc_test1 unique (test1); +--drop table atacc1; + +-- something a little more complicated +create table a2tacc1 ( test int, test2 int); +-- add a unique constraint +alter table a2tacc1 add constraint a2tacc_test1 unique (test, test2); +-- insert initial value +insert into a2tacc1 (test,test2) values (4,4); +-- should fail +insert into a2tacc1 (test,test2) values (4,4); +-- should all succeed +insert into a2tacc1 (test,test2) values (4,5); +insert into a2tacc1 (test,test2) values (5,4); +insert into a2tacc1 (test,test2) values (5,5); +--drop table atacc1; + +-- lets do some naming tests +create table a3tacc1 (test int, test2 int, unique(test)); +alter table a3tacc1 add unique (test2); +-- should fail for @@ second one @@ +insert into a3tacc1 (test2, test) values (3, 3); +insert into a3tacc1 (test2, test) values (2, 3); +--drop table atacc1; + +-- test primary key constraint adding + +create table a4tacc1 ( test int ) with oids; +-- add a primary key constraint +alter table a4tacc1 add constraint a4tacc_test1 primary key (test); +-- insert first value +insert into a4tacc1 (test) values (2); +-- should fail +insert into a4tacc1 (test) values (2); +-- should succeed +insert into a4tacc1 (test) values (4); +-- inserting NULL should fail +insert into a4tacc1 (test) values(NULL); +-- try adding a second primary key (should fail) +alter table a4tacc1 add constraint atacc_oid1 primary key(oid); +-- drop first primary key constraint +alter table a4tacc1 drop constraint a4tacc_test1 restrict; +-- try adding a primary key on oid (should succeed) +alter table a4tacc1 add constraint atacc_oid1 primary key(oid); +--drop table a4tacc1; + +-- let's do one where the primary key constraint fails when added +create table a5tacc1 ( test int ); +-- insert soon to be failing rows +insert into a5tacc1 (test) values (2); +insert into a5tacc1 (test) values (2); +-- add a primary key (fails) +alter table a5tacc1 add constraint a5tacc_test1 primary key (test); +insert into a5tacc1 (test) values (3); +--drop table a5tacc1; + +-- let's do another one where the primary key constraint fails when added +create table a6tacc1 ( test int ); +-- insert soon to be failing row +insert into a6tacc1 (test) values (NULL); +-- add a primary key (fails) +alter table a6tacc1 add constraint a6tacc_test1 primary key (test); +insert into a6tacc1 (test) values (3); +--drop table atacc1; + +-- let's do one where the primary key constraint fails +-- because the column doesn't exist +create table a7tacc1 ( test int ); +-- add a primary key constraint (fails) +alter table a7tacc1 add constraint a7tacc_test1 primary key (test1); +--drop table atacc1; + +-- adding a new column as primary key to a non-empty table. +-- should fail unless the column has a non-null default value. +create table a8tacc1 ( test int ); +insert into a8tacc1 (test) values (0); +-- add a primary key column without a default (fails). +alter table a8tacc1 add column test2 int primary key; +-- now add a primary key column with a default (succeeds). +alter table a8tacc1 add column test2 int default 0 primary key; +--drop table atacc1; + +-- something a little more complicated +create table a9tacc1 ( test int, test2 int); +-- add a primary key constraint +alter table a9tacc1 add constraint a9tacc_test1 primary key (test, test2); +-- try adding a second primary key - should fail +alter table a9tacc1 add constraint atacc_test2 primary key (test); +-- insert initial value +insert into a9tacc1 (test,test2) values (4,4); +-- should fail +insert into a9tacc1 (test,test2) values (4,4); +insert into a9tacc1 (test,test2) values (NULL,3); +insert into a9tacc1 (test,test2) values (3, NULL); +insert into a9tacc1 (test,test2) values (NULL,NULL); +-- should all succeed +insert into a9tacc1 (test,test2) values (4,5); +insert into a9tacc1 (test,test2) values (5,4); +insert into a9tacc1 (test,test2) values (5,5); +--drop table atacc1; + +-- lets do some naming tests +create table at10acc1 (test int, test2 int, primary key(test)); +-- only first should succeed +insert into at10acc1 (test2, test) values (3, 3); +insert into at10acc1 (test2, test) values (2, 3); +insert into at10acc1 (test2, test) values (1, NULL); +--drop table atacc1; + +-- alter table modify not null +-- try altering syscatlog should fail +alter table pg_class modify (relname not null enable); +alter table pg_class modify relname not null enable; +-- try altering non-existent table should fail +alter table non_existent modify (bar not null enable); +-- test alter table +create table test_modify (a int, b int); +alter table test_modify replica identity full; +alter table test_modify modify (b not null enable); +insert into test_modify(b) values (null); +insert into test_modify values (1, null); +alter table test_modify modify(b null); +insert into test_modify values (1, null); +alter table test_modify modify (b not null enable); +alter table test_modify replica identity full; +delete from test_modify; +alter table test_modify modify (a not null, b not null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (a null, b null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (b constraint ak not null); +delete from test_modify; +alter table test_modify modify (b constraint ak not null); +insert into test_modify values(1,1); +insert into test_modify values(1,null); +alter table test_modify modify (b constraint ak null); +insert into test_modify values(1,null); +alter table test_modify modify (a null, a not null); +-- try alter view should fail +create view test_modify_view as select * from test_modify; +alter table test_modify_view modify (a not null enable); +drop view test_modify_view; +--drop table test_modify; + +-- alter table / alter column [set/drop] not null tests +-- try altering system catalogs, should fail +alter table pg_class alter column relname drop not null; +alter table pg_class alter relname set not null; + +-- try altering non-existent table, should fail +alter table non_existent alter column bar set not null; +alter table non_existent alter column bar drop not null; + +-- test setting columns to null and not null and vice versa +-- test checking for null values and primary key +create table at11acc1 (test int not null) with oids; +alter table at11acc1 add constraint "atacc1_pkey" primary key (test); +alter table at11acc1 alter column test drop not null; +alter table at11acc1 drop constraint "atacc1_pkey"; +alter table at11acc1 alter column test drop not null; +insert into at11acc1 values (null); +alter table at11acc1 alter test set not null; +atler table at11acc1 replica identity full; +delete from at11acc1; +alter table at11acc1 alter test set not null; + +-- try altering a non-existent column, should fail +alter table at11acc1 alter bar set not null; +alter table at11acc1 alter bar drop not null; + +-- try altering the oid column, should fail +alter table at11acc1 alter oid set not null; +alter table at11acc1 alter oid drop not null; + +-- try creating a view and altering that, should fail +create view myview as select * from at11acc1; +alter table myview alter column test drop not null; +alter table myview alter column test set not null; +drop view myview; + +--drop table atacc1; + +-- test inheritance +create table parent (a int); +create table child1 (b varchar(255)) inherits (parent); +create table child1 (like parent); +alter table child1 add column (b varchar(255)); + +alter table parent alter a set not null; +insert into parent values (NULL); +insert into child1 (a, b) values (NULL, 'foo'); +alter table parent alter a drop not null; +insert into parent values (NULL); +insert into child1 (a, b) values (NULL, 'foo'); +alter table only parent alter a set not null; +alter table child1 alter a set not null; +alter table parent replica identity full; +alter table child1 replica identity full; +delete from parent; +alter table only parent alter a set not null; +insert into parent values (NULL); +alter table child1 alter a set not null; +insert into child1 (a, b) values (NULL, 'foo'); +delete from child1; +alter table child1 alter a set not null; +insert into child1 (a, b) values (NULL, 'foo'); +--drop table child; +--drop table parent; + +-- test setting and removing default values +create table def_test ( + c1 int4 default 5, + c2 text default 'initial_default' +); +insert into def_test default values; +alter table def_test alter column c1 drop default; +insert into def_test default values; +alter table def_test alter column c2 drop default; +insert into def_test default values; +alter table def_test alter column c1 set default 10; +alter table def_test alter column c2 set default 'new_default'; +insert into def_test default values; +select * from def_test order by 1, 2; + +-- set defaults to an incorrect type: this should fail +alter table def_test alter column c1 set default 'wrong_datatype'; +alter table def_test alter column c2 set default 20; + +-- set defaults on a non-existent column: this should fail +alter table def_test alter column c3 set default 30; + +-- set defaults on views: we need to create a view, add a rule +-- to allow insertions into it, and then alter the view to add +-- a default +create view def_view_test as select * from def_test; +create rule def_view_test_ins as + on insert to def_view_test + do instead insert into def_test select new.*; +insert into def_view_test default values; +alter table def_view_test alter column c1 set default 45; +insert into def_view_test default values; +alter table def_view_test alter column c2 set default 'view_default'; +insert into def_view_test default values; +select * from def_view_test order by 1, 2; + +drop rule def_view_test_ins on def_view_test; +drop view def_view_test; +--drop table def_test; + +-- alter table / drop column tests +-- try altering system catalogs, should fail +alter table pg_class drop column relname; + +-- try altering non-existent table, should fail +alter table nosuchtable drop column bar; + +-- test dropping columns +create table at12acc1 (a int4 not null, b int4, c int4 not null, d int4) with oids; +insert into at12acc1 values (1, 2, 3, 4); +alter table at12acc1 drop a; +alter table at12acc1 drop a; + +-- SELECTs +select * from at12acc1; +select * from at12acc1 order by a; +select * from at12acc1 order by "........pg.dropped.1........"; +select * from at12acc1 group by a; +select * from at12acc1 group by "........pg.dropped.1........"; +select at12acc1.* from at12acc1; +select a from at12acc1; +select at12acc1.a from at12acc1; +select b,c,d from at12acc1; +select a,b,c,d from at12acc1; +select * from at12acc1 where a = 1; +select "........pg.dropped.1........" from at12acc1; +select at12acc1."........pg.dropped.1........" from at12acc1; +select "........pg.dropped.1........",b,c,d from at12acc1; +select * from at12acc1 where "........pg.dropped.1........" = 1; +alter table at12acc1 replica identity full; +-- UPDATEs +update at12acc1 set a = 3; +update at12acc1 set b = 2 where a = 3; +update at12acc1 set "........pg.dropped.1........" = 3; +update at12acc1 set b = 2 where "........pg.dropped.1........" = 3; + +-- INSERTs +insert into at12acc1 values (10, 11, 12, 13); +insert into at12acc1 values (default, 11, 12, 13); +insert into at12acc1 values (11, 12, 13); +insert into at12acc1 (a) values (10); +insert into at12acc1 (a) values (default); +insert into at12acc1 (a,b,c,d) values (10,11,12,13); +insert into at12acc1 (a,b,c,d) values (default,11,12,13); +insert into at12acc1 (b,c,d) values (11,12,13); +insert into at12acc1 ("........pg.dropped.1........") values (10); +insert into at12acc1 ("........pg.dropped.1........") values (default); +insert into at12acc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13); +insert into at12acc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13); + +-- DELETEs +alter table at12acc1 replica identity full; +delete from at12acc1 where a = 3; +delete from at12acc1 where "........pg.dropped.1........" = 3; +delete from at12acc1; + +-- try dropping a non-existent column, should fail +alter table at12acc1 drop bar; + +-- try dropping the oid column, should succeed +alter table at12acc1 drop oid; + +-- try dropping the xmin column, should fail +alter table at12acc1 drop xmin; + +-- try creating a view and altering that, should fail +create view myview as select * from at12acc1; +select * from myview; +alter table myview drop d; +drop view myview; + +-- test some commands to make sure they fail on the dropped column +analyze at12acc1(a); +analyze at12acc1("........pg.dropped.1........"); +vacuum analyze at12acc1(a); +vacuum analyze at12acc1("........pg.dropped.1........"); +comment on column at12acc1.a is 'testing'; +comment on column at12acc1."........pg.dropped.1........" is 'testing'; +alter table at12acc1 alter a set storage plain; +alter table at12acc1 alter "........pg.dropped.1........" set storage plain; +alter table at12acc1 alter a set statistics 0; +alter table at12acc1 alter "........pg.dropped.1........" set statistics 0; +alter table at12acc1 alter a set default 3; +alter table at12acc1 alter "........pg.dropped.1........" set default 3; +alter table at12acc1 alter a drop default; +alter table at12acc1 alter "........pg.dropped.1........" drop default; +alter table at12acc1 alter a set not null; +alter table at12acc1 alter "........pg.dropped.1........" set not null; +alter table at12acc1 alter a drop not null; +alter table at12acc1 alter "........pg.dropped.1........" drop not null; +alter table at12acc1 rename a to x; +alter table at12acc1 rename "........pg.dropped.1........" to x; +alter table at12acc1 add primary key(a); +alter table at12acc1 add primary key("........pg.dropped.1........"); +alter table at12acc1 add unique(a); +alter table at12acc1 add unique("........pg.dropped.1........"); +alter table at12acc1 add check (a > 3); +alter table at12acc1 add check ("........pg.dropped.1........" > 3); +create table atacc2 (id int4 unique); +alter table at12acc1 add foreign key (a) references atacc2(id); +alter table at12acc1 add foreign key ("........pg.dropped.1........") references atacc2(id); +alter table atacc2 add foreign key (id) references at12acc1(a); +alter table atacc2 add foreign key (id) references at12acc1("........pg.dropped.1........"); +--drop table atacc2; +create index "testing_idx" on at12acc1(a); +create index "testing_idx" on at12acc1("........pg.dropped.1........"); + +-- test create as and select into +insert into at12acc1 values (21, 22, 23); +create table test1 as select * from at12acc1; +select * from test1; +--drop table test1; +select * into test2 from at12acc1; +select * from test2; +--drop table test2; + +-- try dropping all columns +alter table at12acc1 drop c; +alter table at12acc1 drop d; +alter table at12acc1 drop b; +select * from at12acc1; + +--drop table atacc1; +-- test constraint error reporting in presence of dropped columns +create table at13acc1 (id serial primary key, value int check (value < 10)); +insert into at13acc1(value) values (100); +alter table at13acc1 drop column value; +alter table at13acc1 add column value int check (value < 10); +insert into at13acc1(value) values (100); +insert into at13acc1(id, value) values (null, 0); +alter table at13acc1 alter column id set default 10; +drop sequence at13acc1_id_seq; + +-- test inheritance +create table parent (a int, b int, c int); +insert into parent values (1, 2, 3); +alter table parent drop a; +create table child (d varchar(255)) inherits (parent); +create table child2 as select * from parent; +alter table child2 add column d varchar(255); +insert into child2 values (12, 13, 'testing'); + +select * from parent order by b; +select * from child2; +alter table parent drop c; +select * from parent order by b; +select * from child2; + +--drop table child; +--drop table parent; + +-- test copy in/out +create table test (a int4, b int4, c int4); +insert into test values (1,2,3); +alter table test drop a; +copy test to stdout; +copy test(a) to stdout; +copy test("........pg.dropped.1........") to stdout; +copy test from stdin; +10 11 12 +\. +select * from test order by b; +copy test from stdin; +21 22 +\. +select * from test order by b; +copy test(a) from stdin; +copy test("........pg.dropped.1........") from stdin; +copy test(b,c) from stdin; +31 32 +\. +select * from test order by b; +--drop table test; + +-- test inheritance + +create table dropColumn (a int, b int, e int); +create table dropColumnChild (c int) inherits (dropColumn); +select * into dropColumnChild from dropColumn; +alter table dropColumnChild add column c int; +create table dropColumnAnother (d int) inherits (dropColumnChild); +select * into dropColumnAnother from dropColumnChild; +alter table dropColumnAnother add column d int; +-- these two should fail +alter table dropColumnchild drop column a; +alter table only dropColumnChild drop column b; + + + +-- these three should work +alter table only dropColumn drop column e; +alter table dropColumnChild drop column c; +alter table dropColumn drop column a; + +create table renameColumn (a int); +create table renameColumnChild (b int) inherits (renameColumn); +create table renameColumnChild as select * from renameColumn; +create table renameColumnAnother (c int) inherits (renameColumnChild); +select * into renameColumnAnother from renameColumnChild; +alter table renameColumnAnother add column b int; + +-- these three should fail +alter table renameColumnChild rename column a to d; +alter table only renameColumnChild rename column a to d; +alter table only renameColumn rename column a to d; + +-- these should work +alter table renameColumn rename column a to d; +alter table renameColumnChild rename column b to a; + +-- these should work +alter table if exists doesnt_exist_tab rename column a to d; +alter table if exists doesnt_exist_tab rename column b to a; + +-- this should work +alter table renameColumn add column w int; + +-- this should fail +alter table only renameColumn add column x int; + + +-- Test corner cases in dropping of inherited columns + +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); +create table c1 (like p1); +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +-- should work +alter table p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +select f1 from c1; +alter table c1 drop column f1; +select f1 from c1; + +--drop table p1 cascade; + +create table p11 (f1 int, f2 int); +create table c11 () inherits(p11); +create table c11 (like p11); +-- should be rejected since c1.f1 is inherited +alter table c11 drop column f1; +alter table p11 drop column f1; +-- c1.f1 is dropped now, since there is no local definition for it +select f1 from c11; + +--drop table p1 cascade; + +create table p12 (f1 int, f2 int); +create table c12 () inherits(p12); +create table c12 as select * from p12; +-- should be rejected since c1.f1 is inherited +alter table c12 drop column f1; +alter table only p1 drop column f1; +-- c1.f1 is NOT dropped, but must now be considered non-inherited +alter table c12 drop column f1; + +--drop table p1 cascade; + +create table p13 (f1 int, f2 int); +create table c13 (f1 int not null) inherits(p1); +create table c13 as select * from p13; +-- should be rejected since c1.f1 is inherited +alter table c13 drop column f1; +alter table only p13 drop column f1; +-- c1.f1 is still there, but no longer inherited +alter table c13 drop column f1; + +--drop table p1 cascade; + +create table p14(id int, name text); +create table p24(id2 int, name text, height int); +create table c14(age int) inherits(p1,p2); +create table c14 as select * from p1,p2; +alter table c14 add column age int; +create table gc1() inherits (c14); +select * into gc1 from c14; + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +-- should work +alter table only p14 drop column name; +-- should work. Now c1.name is local and inhcount is 0. +alter table p24 drop column name; +-- should be rejected since its inherited +alter table gc1 drop column name; +-- should work, and drop gc1.name along +alter table c14 drop column name; +-- should fail: column does not exist +alter table gc1 drop column name; +-- should work and drop the attribute in all tables +alter table p24 drop column height; + +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + +--drop table p1, p2 cascade; + +-- +-- Test the ALTER TABLE SET WITH/WITHOUT OIDS command +-- +create table altstartwith (col integer) with oids; + +insert into altstartwith values (1); + +select oid > 0, * from altstartwith; + +alter table altstartwith set without oids; + +select oid > 0, * from altstartwith; -- fails +select * from altstartwith; + +alter table altstartwith set with oids; + +select oid > 0, * from altstartwith; + +--drop table altstartwith; + +-- Check inheritance cases +create table altwithoid (col integer) with oids; + +-- Inherits parents oid column anyway +create table altinhoid () inherits (altwithoid) without oids; + +insert into altinhoid values (1); + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +alter table altwithoid set without oids; + +select oid > 0, * from altwithoid; -- fails +select oid > 0, * from altinhoid; -- fails +select * from altwithoid; +select * from altinhoid; + +alter table altwithoid set with oids; + +select oid > 0, * from altwithoid; +select oid > 0, * from altinhoid; + +--drop table altwithoid cascade; + +create table altwithoid1 (col integer) without oids; + +-- child can have local oid column +create table altinhoid1 () inherits (altwithoid1) with oids; + +insert into altinhoid1 values (1); + +select oid > 0, * from altwithoid1; -- fails +select oid > 0, * from altinhoid1; + +alter table altwithoid1 set with oids; + +select oid > 0, * from altwithoid1; +select oid > 0, * from altinhoid1; + +-- the child's local definition should remain +alter table altwithoid1 set without oids; + +select oid > 0, * from altwithoid1; -- fails +select oid > 0, * from altinhoid1; + +--drop table altwithoid cascade; + +-- test renumbering of child-table columns in inherited operations + +create table p15 (f1 int); +create table c15 (f2 text, f3 int) inherits (p1); +create table c15 as select * from p15; +alter table c15 add column f2 text, add column f3 int; +alter table p15 add column a1 int check (a1 > 0); +alter table p15 add column f2 text; + +insert into p15 values (1,2,'abc'); +insert into c15 values(11,'xyz',33,0); -- should fail +insert into c15 values(11,'xyz',33,22); + +select * from p15 order by f1; +alter table p15 replica identity full; +update p15 set a1 = a1 + 1, f2 = upper(f2); +select * from p15 order by f1; + +--drop table p1 cascade; + +-- test that operations with a dropped column do not try to reference +-- its datatype + +-- create domain mytype as text; +create type mytype as (a text); +create table foo (f1 text, f2 mytype, f3 text); + +insert into foo values('bb','cc','dd'); +select * from foo order by f1; + +-- drop domain mytype cascade; + +select * from foo order by f1; +insert into foo values('qq','rr'); +select * from foo order by f1; +alter table foo replica identity full; +update foo set f3 = 'zz'; +select * from foo order by f1; +select f3,max(f1) from foo group by f3; + +-- Simple tests for alter table column type +alter table foo replica identity full; +delete from foo where f1 = 'qq'; +alter table foo alter f1 TYPE integer; -- fails +alter table foo alter f1 TYPE varchar(10); +--drop table foo; + +create table anothertab (atcol1 serial8, atcol2 boolean, + constraint anothertab_chk check (atcol1 <= 3));; + +insert into anothertab (atcol1, atcol2) values (default, true); +insert into anothertab (atcol1, atcol2) values (default, false); +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol1 type boolean; -- we could support this cast +alter table anothertab alter column atcol1 type integer; + +select * from anothertab order by atcol1, atcol2; + +insert into anothertab (atcol1, atcol2) values (45, null); -- fails +insert into anothertab (atcol1, atcol2) values (default, null); + +select * from anothertab order by atcol1, atcol2; + +alter table anothertab alter column atcol2 type text + using case when atcol2 is true then 'IT WAS TRUE' + when atcol2 is false then 'IT WAS FALSE' + else 'IT WAS NULL!' end; + +select * from anothertab order by atcol1, atcol2; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab alter column atcol1 drop default; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +alter table anothertab drop constraint anothertab_chk; +alter table anothertab drop constraint anothertab_chk; -- fails +alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds + +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; + +select * from anothertab order by atcol1, atcol2; + +--drop table anothertab; +-- alter table anothertab alter column atcol1 default false; +drop sequence anothertab_atcol1_seq; + +create table another (f1 int, f2 text);; + +insert into another values(1, 'one'); +insert into another values(2, 'two'); +insert into another values(3, 'three'); + +select * from another order by f1, f2; + +alter table another + alter f1 type text using f2 || ' more', + alter f2 type bigint using f1 * 10; + +select * from another order by f1, f2; + +--drop table another; + +-- table's row type +create table tab1 (a int, b text); +create table tab2 (x int, y tab1); +alter table tab1 alter column b type varchar; -- fails + +-- disallow recursive containment of row types +-- create table recur1 (f1 int); +-- alter table recur1 add column f2 recur1; -- fails +-- alter table recur1 add column f2 recur1[]; -- fails +-- create domain array_of_recur1 as recur1[]; +-- alter table recur1 add column f2 array_of_recur1; -- fails +-- create table recur2 (f1 int, f2 recur1); +-- alter table recur1 add column f2 recur2; -- fails +-- alter table recur1 add column f2 int; +-- alter table recur1 alter column f2 type recur2; -- fails + +-- SET STORAGE may need to add a TOAST table +create table test_storage (a text); +alter table test_storage alter a set storage plain; +alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table +alter table test_storage alter a set storage extended; -- re-add TOAST table + +select reltoastrelid <> 0 as has_toast_table +from pg_class +where oid = 'test_storage'::regclass; + +-- ALTER TYPE with a check constraint and a child table (bug before Nov 2012) +CREATE TABLE test_inh_check (a float check (a > 10.2)); +CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); +ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +\d test_inh_check +\d test_inh_check_child + +-- +-- lock levels +-- +drop type lockmodes; +create type lockmodes as enum ( + 'AccessShareLock' +,'RowShareLock' +,'RowExclusiveLock' +,'ShareUpdateExclusiveLock' +,'ShareLock' +,'ShareRowExclusiveLock' +,'ExclusiveLock' +,'AccessExclusiveLock' +); + +drop view my_locks; +create or replace view my_locks as +select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode +from pg_locks l join pg_class c on l.relation = c.oid +where virtualtransaction = ( + select virtualtransaction + from pg_locks + where transactionid = txid_current()::integer) +and locktype = 'relation' +and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') +and c.relname != 'my_locks' +group by c.relname; + +create table alterlock (f1 int primary key, f2 text); + +start transaction; alter table alterlock alter column f2 set statistics 150; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock cluster on alterlock_pkey; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set without cluster; +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (fillfactor = 100); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock reset (fillfactor); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (toast.autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock set (autovacuum_enabled = off); +select * from my_locks order by 1; +commit; + +start transaction; alter table alterlock alter column f2 set (n_distinct = 1); +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set storage extended; +select * from my_locks order by 1; +rollback; + +start transaction; alter table alterlock alter column f2 set default 'x'; +select * from my_locks order by 1; +rollback; + +-- cleanup +--drop table alterlock; +drop view my_locks; +-- drop type lockmodes; + +-- +-- --alter function +-- +--create function test_strict(text) returns text as + 'select coalesce($1, ''got passed a null'');' + language sql returns null on null input; +select test_strict(NULL); +--alter function test_strict(text) called on null input; +select test_strict(NULL); + +--create function non_strict(text) returns text as + 'select coalesce($1, ''got passed a null'');' + language sql called on null input; +select non_strict(NULL); +--alter function non_strict(text) returns null on null input; +select non_strict(NULL); + +-- +-- alter object set schema +-- + +create schema alter1; +create schema alter2; + +-- cannot move table into system built-in schema +create table test1(a int); +alter table test1 set schema dbms_random; +alter table test1 set schema utl_file; + +create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0)); + +create view alter1.v1 as select * from alter1.t1; + +-- --create function alter1.plus1(int) returns int as 'select $1+1' language sql; + +-- create domain alter1.posint integer check (value > 0); + +create type alter1.ctype as (f1 int, f2 text); + +--create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql +as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; + +--create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype); + +--create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as + -- operator 1 alter1.=(alter1.ctype, alter1.ctype); + +-- create conversion alter1.ascii_to_utf8 for 'sql_ascii' to 'utf8' from ascii_to_utf8; + +--create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +--create text search configuration alter1.cfg(parser = alter1.prs); +--create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); +--create text search dictionary alter1.dict(template = alter1.tmpl); + +insert into alter1.t1(f2) values(11); +insert into alter1.t1(f2) values(12); + +alter table alter1.t1 set schema alter2; +alter table alter1.v1 set schema alter2; +--alter function alter1.plus1(int) set schema alter2; +-- alter domain alter1.posint set schema alter2; +--alter operator class alter1.ctype_hash_ops using hash set schema alter2; +--alter operator family alter1.ctype_hash_ops using hash set schema alter2; +--alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2; +--alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2; +alter type alter1.ctype set schema alter2; +--alter conversion alter1.ascii_to_utf8 set schema alter2; +--alter text search parser alter1.prs set schema alter2; +--alter text search configuration alter1.cfg set schema alter2; +--alter text search template alter1.tmpl set schema alter2; +--alter text search dictionary alter1.dict set schema alter2; + +-- this should succeed because nothing is left in alter1 +-- drop schema alter1; + +insert into alter2.t1(f2) values(13); +insert into alter2.t1(f2) values(14); + +select * from alter2.t1 order by f1, f2; + +alter table alter1.t1 alter column f1 drop default; +drop sequence alter1.t1_f1_seq; + +select * from alter2.v1 order by f1, f2; +drop view alter2.v1; + +select alter2.plus1(41); + +-- clean up +-- drop schema alter2 cascade; +-- drop schema alter1 cascade; + +-- +-- composite types +-- + +CREATE TYPE test_type AS (a int); +\d test_type + +ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ADD ATTRIBUTE b text; +\d test_type + +ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +\d test_type + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE b; +\d test_type + +ALTER TYPE test_type DROP ATTRIBUTE c; -- fails + +ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; + +ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +\d test_type + +ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; +ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +\d test_type + +-- DROP TYPE test_type; + +CREATE TYPE test_type1 AS (a int, b text); +CREATE TABLE test_tbl1 (x int, y test_type1); +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails + +CREATE TYPE test_type2 AS (a int, b text); +-- CREATE TABLE test_tbl2 OF test_type2; +CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails +ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails +ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +\d test_type2 +\d test_tbl2 + +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +\d test_type2 +\d test_tbl2 +\d test_tbl2_subclass + +--drop table test_tbl2_subclass; + +-- This test isn't that interesting on its own, but the purpose is to leave +-- behind a table to test pg_upgrade with. The table has a composite type +-- column in it, and the composite type has a dropped attribute. +CREATE TYPE test_type3 AS (a int); +CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; +ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; + +CREATE TYPE test_type_empty AS (); + +-- +-- typed tables: OF / NOT OF +-- + +CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); +ALTER TYPE tt_t0 DROP ATTRIBUTE z; +CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK +CREATE TABLE tt1 (x int, y bigint); -- wrong base type +CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod +CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order +CREATE TABLE tt4 (x int); -- too few columns +CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns +CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent +CREATE TABLE tt7 (x int, q text, y numeric(8,2)) WITH OIDS; +ALTER TABLE tt7 DROP q; -- OK + +ALTER TABLE tt0 OF tt_t0; +ALTER TABLE tt1 OF tt_t0; +ALTER TABLE tt2 OF tt_t0; +ALTER TABLE tt3 OF tt_t0; +ALTER TABLE tt4 OF tt_t0; +ALTER TABLE tt5 OF tt_t0; +ALTER TABLE tt6 OF tt_t0; +ALTER TABLE tt7 OF tt_t0; + +CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); +ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table +ALTER TABLE tt7 NOT OF; +\d tt7 +drop table tt0; +-- make sure we can drop a constraint on the parent but it remains on the child +CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); +CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent); +ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; +-- should fail +INSERT INTO test_drop_constr_child (c) VALUES (NULL); +--drop table test_drop_constr_parent CASCADE; + +-- +-- IF EXISTS test +-- +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +CREATE TABLE tt8(a int); +CREATE SCHEMA alter2; + +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; + +\d alter2.tt8 + +--drop table alter2.tt8; +DROP SCHEMA alter2; +--custom script +--create table +CREATE TABLE TBL_DOMAIN +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL +); +--create/recreate primary, unique and foreign key constraints +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) + USING INDEX ; + +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) + USING INDEX ; +\d+ TBL_DOMAIN +--drop table TBL_DOMAIN; + +--create table +CREATE TABLE TBL_CM_MAXTSENDTOHOST +( + I_MODULETYPE NUMBER(38) NOT NULL, + I_MODULENO NUMBER(38) NOT NULL, + I_PLAMODULENO NUMBER(38) NOT NULL, + I_TABLEID NUMBER(38) NOT NULL, + I_OLDMAXTUPLE NUMBER(38) NOT NULL, + I_NEWMAXTUPLE NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '' +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_CM_MAXTSENDTOHOST + ADD PRIMARY KEY (I_PLAMODULENO, I_TABLEID) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); + \d+ TBL_CM_MAXTSENDTOHOST + --drop table TBL_CM_MAXTSENDTOHOST; + +--create table +CREATE TABLE TBL_LICCTRLDESC_DEFAULT +( + I_INDEX NUMBER(38) NOT NULL, + SV_FEATURENAME VARCHAR2(64) NOT NULL, + SV_ITEMNAME VARCHAR2(64) NOT NULL, + I_ITEMTYPE NUMBER(38) NOT NULL, + I_ITEMVALUEMIN NUMBER(38) NOT NULL, + I_ITEMVALUEMAX NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '', + I_STATUS NUMBER(38) NOT NULL +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_LICCTRLDESC_DEFAULT + ADD PRIMARY KEY (I_INDEX) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +--add unique index +CREATE UNIQUE INDEX IDX_TBL_LICCTRL_DEF ON TBL_LICCTRLDESC_DEFAULT (I_INDEX DESC, I_STATUS) + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +\d+ TBL_LICCTRLDESC_DEFAULT + --drop table TBL_LICCTRLDESC_DEFAULT; +--using index clause +CREATE TABLE STUDENTS +( + ID INT, + NAME VARCHAR2(20), + AGE INT, + ADDRESS VARCHAR(30) +); + --alter table to add unique index or primary key +ALTER TABLE STUDENTS ADD UNIQUE (ID) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD CONSTRAINT ZHANGYG UNIQUE (AGE, ADDRESS) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD PRIMARY KEY (AGE) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); +\d+ STUDENTS +--drop table STUDENTS; +--simulate A db's ALTER TABLE gram +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +ALTER TABLE MODIFY_TABLE_A ADD (mychar CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint1 INT, mychar1 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint2 INT, mychar2 CHAR, mychar3 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD a CHAR, ADD b CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A ADD mychar4 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I VARCHAR2(64); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I CHAR, MODIFY myint1 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(12)); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), mychar1 INT); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), myint1 INT); +--drop table MODIFY_TABLE_A; + +create table test_alter_type(a int,b text); +alter table test_alter_type alter column a type regclass; +--drop table test_alter_type; + +create table test_mod(a int,b text); +alter table test_mod alter column a type regclass; +alter table test_mod alter column a set default "d"; +alter table test_mod alter column a set default "d"::int; +alter table test_mod alter column a set default "d"::int + 1; +--drop table test_mod; + +--simulate A db and postgresql, ALTER TABLE IF EXISTS table_name ADD( { element_list_clause } [, ...] ) +--simulate A db and postgresql, ALTER TABLE IF EXISTS table_name MODIFY( { element_list_clause } [, ...] ) +create schema columnar_storage; +create table columnar_storage.create_columnar_add_common_008 (c_tinyint tinyint,c_smallint smallint,c_int integer,c_bigint bigint,c_money money,c_numeric numeric,c_real real,c_double double precision,c_decimal decimal,c_varchar varchar,c_char char(30),c_nvarchar2 nvarchar2,c_text text,c_timestamp timestamp with time zone,c_timestamptz timestamp without time zone,c_date date,c_time time without time zone,c_timetz time with time zone,c_interval interval,c_tinterval tinterval,c_smalldatetime smalldatetime,c_bytea bytea,c_boolean boolean,c_inet inet,c_cidr cidr,c_bit bit(10),c_varbit varbit(10),c_oid oid) with (orientation=column); +alter table if exists columnar_storage.create_columnar_add_common_007 modify (c_int varchar(20)); +alter table if exists columnar_storage.create_columnar_add_common_008 modify (c_int varchar(20), c_double varchar(20)); +select * from columnar_storage.create_columnar_add_common_008; +--drop table columnar_storage.create_columnar_add_common_008; +create table columnar_storage.create_columnar_add_common_008 (c_tinyint tinyint,c_smallint smallint,c_int integer,c_bigint bigint,c_money money,c_numeric numeric,c_real real,c_double double precision,c_decimal decimal,c_varchar varchar,c_char char(30),c_nvarchar2 nvarchar2,c_text text,c_timestamp timestamp with time zone,c_timestamptz timestamp without time zone,c_date date,c_time time without time zone,c_timetz time with time zone,c_interval interval,c_tinterval tinterval,c_smalldatetime smalldatetime,c_bytea bytea,c_boolean boolean,c_inet inet,c_cidr cidr,c_bit bit(10),c_varbit varbit(10),c_oid oid) with (orientation=column); +alter table if exists columnar_storage.create_columnar_add_common_007 add (c_time_008 time without time zone,c_timetz_008 time with time zone); +alter table if exists columnar_storage.create_columnar_add_common_008 add (c_time_008 time without time zone,c_timetz_008 time with time zone); +select * from columnar_storage.create_columnar_add_common_008; +--drop table columnar_storage.create_columnar_add_common_008; +drop schema columnar_storage cascade; + +create table test_drop_column_1 (a int, b int, c int); +create table test_drop_column_2 (a int, b int); +create table test_drop_column_3 (a int, b int); +alter table test_drop_column_1 drop column c; +explain (verbose true, costs false) insert into test_drop_column_1 select * from test_drop_column_2; +insert into test_drop_column_1 select * from test_drop_column_2; +explain (verbose true, costs false) insert into test_drop_column_1 select * from test_drop_column_2 order by 2; +insert into test_drop_column_1 select * from test_drop_column_2 order by 2; +explain (verbose true, costs false) insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.a; +insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.a; +explain (verbose true, costs false) insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b; +insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b; +explain (verbose true, costs false) insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b order by 1, 2; +insert into test_drop_column_1 select test_drop_column_2.a, test_drop_column_3.a from test_drop_column_2, test_drop_column_3 where test_drop_column_2.a = test_drop_column_3.b order by 1, 2; +alter table test_drop_column2 replica identity full; +explain (verbose true, costs false) update test_drop_column_1 set a=test_drop_column_2.a from test_drop_column_2; +update test_drop_column_1 set a=test_drop_column_2.a from test_drop_column_2; +explain (verbose true, costs false) delete from test_drop_column_1 where a in (select a from test_drop_column_2); +alter table test_drop_column_1 replica identity full; +delete from test_drop_column_1 where a in (select a from test_drop_column_2); + +create table test_drop_column_cstore_1 (a int, b int, c int) with (orientation = column); +create table test_drop_column_cstore_2 (a int, b int) with (orientation = column); +create table test_drop_column_cstore_3 (a int) with (orientation = column); +alter table test_drop_column_cstore_1 drop column c; +insert into test_drop_column_cstore_1 select * from test_drop_column_cstore_2; +insert into test_drop_column_cstore_1 select * from test_drop_column_cstore_2 order by 2; +insert into test_drop_column_cstore_1 select test_drop_column_cstore_2.a, test_drop_column_cstore_3.a from test_drop_column_cstore_2, test_drop_column_cstore_3 where test_drop_column_cstore_2.a = test_drop_column_cstore_3.a; + +drop table test_drop_column_1; +drop table test_drop_column_2; +drop table test_drop_column_3; +drop table test_drop_column_cstore_1; +drop table test_drop_column_cstore_2; +drop table test_drop_column_cstore_3; + +create table test_hash (a int, b int); +create sequence test_seq1; +alter table test_hash alter column a type serial; --fail +alter table test_hash alter column a set default nextval('test_seq1'); +insert into test_hash(b) values(generate_series(1,10)); +alter table test_hash add column c serial; --not supported +alter table test_hash add column d int default nextval('test_seq1'); --not supported +alter table test_hash add column e int default nextval('test_seq1')*10; --not supported +--drop table test_hash; +alter table test_hash alter column a drop default; +drop sequence test_seq1; + +-- check column addition within a view (bug #14876) +create table at_base_table(id int, stuff text); +insert into at_base_table values (23, 'skidoo'); +create view at_view_1 as select * from at_base_table bt; +create view at_view_2 as select *, v1 as j from at_view_1 v1; +\d+ at_view_1 +\d+ at_view_2 +explain (verbose, costs off) select * from at_view_2; +select * from at_view_2; + +create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; +\d+ at_view_1 +\d+ at_view_2 +explain (verbose, costs off) select * from at_view_2; +select * from at_view_2; + +drop view at_view_2; +drop view at_view_1; +--drop table at_base_table; + +create table tt_row_rep_1(a int); +alter table tt_row_rep_1 drop column a; + +create table tt_row_rep_2(a int, b int); +alter table tt_row_rep_2 drop column b; +alter table tt_row_rep_2 drop column a; + +create table tt_col_rep_1(a int) with(orientation=column); +alter table tt_col_rep_1 drop column a; + +create table tt_col_rep_2(a int, b int) with(orientation=column); +alter table tt_col_rep_2 drop column b; +alter table tt_col_rep_2 drop column a; + +--drop table tt_row_rep_1; +--drop table tt_row_rep_2; +drop table tt_col_rep_1; +drop table tt_col_rep_2; + +-- renaming constraints with cache reset of target relation +CREATE TABLE constraint_rename_cache (a int, + CONSTRAINT chk_a CHECK (a > 0), + PRIMARY KEY (a)); +ALTER TABLE constraint_rename_cache + RENAME CONSTRAINT chk_a TO chk_a_new; +ALTER TABLE constraint_rename_cache + RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new; +CREATE TABLE like_constraint_rename_cache + (LIKE constraint_rename_cache INCLUDING ALL); +\d like_constraint_rename_cache +--drop table constraint_rename_cache; +--drop table like_constraint_rename_cache; + + + +create table t_alter_type(c0 int4range Unique, foreign key(c0) references t_alter_type(c0)); +alter table t_alter_type alter c0 set data type int4range; + +----drop table t_alter_type; + +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +\d MODIFY_TABLE_A +create table aaa(a integer); +\d aaa +create table bbb(B integer); +\d bbb +create table CCC(c integer); +\d CCC +create table DDD(D integer); +\d DDD +create table EEE("E" integer); +\d EEE +create table FFF("FF" integer); +\d FFF +create table HHH("HH" integer); + +alter table aaa rename a to AA; +\d aaa +create table GGG("GdGG" integer); +alter table CCC rename c to "CC"; +alter table FFF rename FF to ff; -- differnt in b compatibility +alter table HHH rename "HH" to gg; + +rename table public.HHH to public.hhh; +rename table public.hhh to public.hhh1; + +create table aaaaa (b int generated by default as identity,c int); +\dS aaaaa_b_seq +insert into aaaaa(c) values(213); +insert into aaaaa(c) values(21); +insert into aaaaa values(3,121); +insert into aaaaa(c) values(111); +insert into aaaaa values(null,212); +alter table aaaaa alter column b drop default; +drop sequence aaaaa_b_seq; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup new file mode 100644 index 0000000000..4db8109ca8 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup @@ -0,0 +1,15 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "create schema fastcheck;set search_path=fastcheck;create table t1_full (a int, b text);insert into t1_full values (1, 'a'), (2, 'b'), (3, 'c');alter table t1_full replica identity full;" +exec_sql_with_user $case_use_db $sub_node1_port "create schema fastcheck;set search_path=fastcheck;create table t1_full (a int, b text, myc int); insert into t1_full values (101, 'a', 1), (102, 'b', 2);" + +exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text);insert into tkey1 values (1, 'a'), (2, 'b'), (3, 'c');alter table tkey1 replica identity default;" +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text, myc int); insert into tkey1 values (101, '101a', 1), (102, '102b', 2);" + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" + +exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.sql new file mode 100644 index 0000000000..1fe374b883 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.sql @@ -0,0 +1,930 @@ +-- +--FOR BLACKLIST FEATURE: REFERENCES/WITH OIDS/RULE/CREATE TYPE/DOMAIN is not supported. +-- + +-- +-- ALTER_TABLE +-- add attribute +-- +set search_path=fastcheck; +CREATE TABLE atmp1 (initial int4); + +COMMENT ON TABLE tmp_wrong IS 'table comment'; +COMMENT ON TABLE atmp1 IS 'table comment'; +COMMENT ON TABLE atmp1 IS NULL; + +ALTER TABLE atmp1 ADD COLUMN xmin integer; -- fails + +ALTER TABLE atmp1 ADD COLUMN a int4 default 3; + +ALTER TABLE atmp1 ADD COLUMN b name; + +ALTER TABLE atmp1 ADD COLUMN c text; + +ALTER TABLE atmp1 ADD COLUMN d float8; + +ALTER TABLE atmp1 ADD COLUMN e float4; + +ALTER TABLE atmp1 ADD COLUMN f int2; + +ALTER TABLE atmp1 ADD COLUMN g polygon; + +ALTER TABLE atmp1 ADD COLUMN h abstime; + +ALTER TABLE atmp1 ADD COLUMN i char; + +ALTER TABLE atmp1 ADD COLUMN j abstime[]; + +ALTER TABLE atmp1 ADD COLUMN k int4; + +ALTER TABLE atmp1 ADD COLUMN l tid; + +ALTER TABLE atmp1 ADD COLUMN m xid; + +ALTER TABLE atmp1 ADD COLUMN n oidvector; + +--ALTER TABLE atmp1 ADD COLUMN o lock; +ALTER TABLE atmp1 ADD COLUMN p smgr; + +ALTER TABLE atmp1 ADD COLUMN q point; + +ALTER TABLE atmp1 ADD COLUMN r lseg; + +ALTER TABLE atmp1 ADD COLUMN s path; + +ALTER TABLE atmp1 ADD COLUMN t box; + +ALTER TABLE atmp1 ADD COLUMN u tinterval; + +ALTER TABLE atmp1 ADD COLUMN v timestamp; + +ALTER TABLE atmp1 ADD COLUMN w interval; + +ALTER TABLE atmp1 ADD COLUMN x float8[]; + +ALTER TABLE atmp1 ADD COLUMN y float4[]; + +ALTER TABLE atmp1 ADD COLUMN z int2[]; + +INSERT INTO atmp1 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp1; + +----drop table tmp; + +-- the wolf bug - schema mods caused inconsistent row descriptors +CREATE TABLE atmp2 ( + initial int4 +); + +ALTER TABLE atmp2 ADD COLUMN a int4; + +ALTER TABLE atmp2 ADD COLUMN b name; + +ALTER TABLE atmp2 ADD COLUMN c text; + +ALTER TABLE atmp2 ADD COLUMN d float8; + +ALTER TABLE atmp2 ADD COLUMN e float4; + +ALTER TABLE atmp2 ADD COLUMN f int2; + +ALTER TABLE atmp2 ADD COLUMN g polygon; + +ALTER TABLE atmp2 ADD COLUMN h abstime; + +ALTER TABLE atmp2 ADD COLUMN i char; + +ALTER TABLE atmp2 ADD COLUMN j abstime[]; + +ALTER TABLE atmp2 ADD COLUMN k int4; + +ALTER TABLE atmp2 ADD COLUMN l tid; + +ALTER TABLE atmp2 ADD COLUMN m xid; + +ALTER TABLE atmp2 ADD COLUMN n oidvector; + +--ALTER TABLE atmp2 ADD COLUMN o lock; +ALTER TABLE atmp2 ADD COLUMN p smgr; + +ALTER TABLE atmp2 ADD COLUMN q point; + +ALTER TABLE atmp2 ADD COLUMN r lseg; + +ALTER TABLE atmp2 ADD COLUMN s path; + +ALTER TABLE atmp2 ADD COLUMN t box; + +ALTER TABLE atmp2 ADD COLUMN u tinterval; + +ALTER TABLE atmp2 ADD COLUMN v timestamp; + +ALTER TABLE atmp2 ADD COLUMN w interval; + +ALTER TABLE atmp2 ADD COLUMN x float8[]; + +ALTER TABLE atmp2 ADD COLUMN y float4[]; + +ALTER TABLE atmp2 ADD COLUMN z int2[]; + +INSERT INTO atmp2 (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); + +SELECT * FROM atmp2; + +----drop table tmp; + + +-- +-- rename - check on both non-temp and temp tables +-- +CREATE TABLE atmp3 (regtable int); +-- Enforce use of COMMIT instead of 2PC for temporary objects + + +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); + +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + +CREATE TABLE stud_emp ( + name text, + age int4, + location point, + salary int4, + manager name, + gpa float8, + percent int4 +) with(autovacuum_enabled = off); + +-- ALTER TABLE ... RENAME on non-table relations +-- renaming indexes (FIXME: this should probably test the index's functionality) +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1; + +ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1; + +-- renaming views +CREATE VIEW tmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE tmp_view RENAME TO tmp_view_new; + +DROP VIEW tmp_view_new; +-- toast-like relation name +alter table stud_emp rename to pg_toast_stud_emp; +alter table pg_toast_stud_emp rename to stud_emp; + +-- renaming index should rename constraint as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraint +ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); +ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo; + +-- renaming constraint should rename index as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +DROP INDEX onek_unique1_constraint; -- to see whether it's there +ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo; +DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; + +-- renaming constraints vs. inheritance +CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); +\d constraint_rename_test + +create table test_modify (a int, b int); +alter table test_modify replica identity full; +alter table test_modify modify (b not null enable); +insert into test_modify(b) values (null); +insert into test_modify values (1, null); +alter table test_modify modify(b null); +insert into test_modify values (1, null); +alter table test_modify modify (b not null enable); +alter table test_modify replica identity full; +delete from test_modify; +alter table test_modify modify (a not null, b not null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (a null, b null); +insert into test_modify values (1,null); +insert into test_modify values (null,1); +alter table test_modify modify (b constraint ak not null); +delete from test_modify; +alter table test_modify modify (b constraint ak not null); +insert into test_modify values(1,1); +insert into test_modify values(1,null); +alter table test_modify modify (b constraint ak null); +insert into test_modify values(1,null); +alter table test_modify modify (a null, a not null); +-- try alter view should fail +create view test_modify_view as select * from test_modify; +alter table test_modify_view modify (a not null enable); +drop view test_modify_view; +--drop table test_modify; + + +-- test setting and removing default values +create table def_test ( + c1 int4 default 5, + c2 text default 'initial_default' +); +insert into def_test default values; +alter table def_test alter column c1 drop default; +insert into def_test default values; +alter table def_test alter column c2 drop default; +insert into def_test default values; +alter table def_test alter column c1 set default 10; +alter table def_test alter column c2 set default 'new_default'; +insert into def_test default values; +select * from def_test order by 1, 2; + +-- set defaults to an incorrect type: this should fail +alter table def_test alter column c1 set default 'wrong_datatype'; +alter table def_test alter column c2 set default 20; + +-- set defaults on a non-existent column: this should fail +alter table def_test alter column c3 set default 30; + +create type mytype as (a text); +create table foo (f1 text, f2 mytype, f3 text); + +insert into foo values('bb','cc','dd'); +select * from foo order by f1; + +-- drop domain mytype cascade; + +select * from foo order by f1; +insert into foo values('qq','rr'); +select * from foo order by f1; +alter table foo replica identity full; +update foo set f3 = 'zz'; +select * from foo order by f1; +select f3,max(f1) from foo group by f3; + +-- Simple tests for alter table column type +alter table foo replica identity full; +delete from foo where f1 = 'qq'; +alter table foo alter f1 TYPE integer; -- fails +alter table foo alter f1 TYPE varchar(10); +--drop table foo; + + +CREATE TABLE TBL_DOMAIN +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL +); +--create/recreate primary, unique and foreign key constraints +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) + USING INDEX ; + +ALTER TABLE TBL_DOMAIN + ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) + USING INDEX ; +\d+ TBL_DOMAIN +--drop table TBL_DOMAIN; + +--create table +CREATE TABLE TBL_CM_MAXTSENDTOHOST +( + I_MODULETYPE NUMBER(38) NOT NULL, + I_MODULENO NUMBER(38) NOT NULL, + I_PLAMODULENO NUMBER(38) NOT NULL, + I_TABLEID NUMBER(38) NOT NULL, + I_OLDMAXTUPLE NUMBER(38) NOT NULL, + I_NEWMAXTUPLE NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '' +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_CM_MAXTSENDTOHOST + ADD PRIMARY KEY (I_PLAMODULENO, I_TABLEID) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); + \d+ TBL_CM_MAXTSENDTOHOST + --drop table TBL_CM_MAXTSENDTOHOST; + +--create table +CREATE TABLE TBL_LICCTRLDESC_DEFAULT +( + I_INDEX NUMBER(38) NOT NULL, + SV_FEATURENAME VARCHAR2(64) NOT NULL, + SV_ITEMNAME VARCHAR2(64) NOT NULL, + I_ITEMTYPE NUMBER(38) NOT NULL, + I_ITEMVALUEMIN NUMBER(38) NOT NULL, + I_ITEMVALUEMAX NUMBER(38) NOT NULL, + I_RESERVED1 NUMBER(38) DEFAULT 0, + I_RESERVED2 NUMBER(38) DEFAULT 0, + I_RESERVED3 NUMBER(38) DEFAULT 0, + I_RESERVED4 NUMBER(38) DEFAULT 0, + I_RESERVED5 NUMBER(38) DEFAULT 0, + I_RESERVED6 NUMBER(38) DEFAULT 0, + I_RESERVED7 NUMBER(38) DEFAULT 0, + SV_RESERVED8 VARCHAR2(32) DEFAULT '', + SV_RESERVED9 VARCHAR2(32) DEFAULT '', + SV_RESERVED10 VARCHAR2(32) DEFAULT '', + I_STATUS NUMBER(38) NOT NULL +) + PCTFREE 10 + INITRANS 1 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ) + ; +--add primary key +ALTER TABLE TBL_LICCTRLDESC_DEFAULT + ADD PRIMARY KEY (I_INDEX) + USING INDEX + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +--add unique index +CREATE UNIQUE INDEX IDX_TBL_LICCTRL_DEF ON TBL_LICCTRLDESC_DEFAULT (I_INDEX DESC, I_STATUS) + PCTFREE 10 + INITRANS 2 + MAXTRANS 255 + STORAGE + ( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED + ); +\d+ TBL_LICCTRLDESC_DEFAULT + --drop table TBL_LICCTRLDESC_DEFAULT; +--using index clause +CREATE TABLE STUDENTS +( + ID INT, + NAME VARCHAR2(20), + AGE INT, + ADDRESS VARCHAR(30) +); + --alter table to add unique index or primary key +ALTER TABLE STUDENTS ADD UNIQUE (ID) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD CONSTRAINT ZHANGYG UNIQUE (AGE, ADDRESS) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); + +ALTER TABLE STUDENTS ADD PRIMARY KEY (AGE) +USING INDEX +PCTFREE 10 +INITRANS 2 +MAXTRANS 255 +STORAGE +( + INITIAL 64K + MINEXTENTS 1 + MAXEXTENTS UNLIMITED +); +\d+ STUDENTS +--drop table STUDENTS; +--simulate A db's ALTER TABLE gram +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +ALTER TABLE MODIFY_TABLE_A ADD (mychar CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint1 INT, mychar1 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD (myint2 INT, mychar2 CHAR, mychar3 CHAR); +ALTER TABLE MODIFY_TABLE_A ADD a CHAR, ADD b CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A ADD mychar4 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I VARCHAR2(64); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY I CHAR, MODIFY myint1 CHAR; +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(12)); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), mychar1 INT); +\d MODIFY_TABLE_A +ALTER TABLE MODIFY_TABLE_A MODIFY (myint1 VARCHAR(13), myint1 INT); +--drop table MODIFY_TABLE_A; + + +CREATE SCHEMA test_sche; +CREATE TABLE test_sche.logical_TB1( +c1 integer, +c2 date, +c3 text) +partition by system +( +partition p1, +partition p2, +partition p3); + +insert into test_sche.logical_TB1 partition(p1) values(1,'2022-01-01','p1'); +insert into test_sche.logical_TB1 partition(p2) values(2,'2022-02-01','p2'); +insert into test_sche.logical_TB1 partition(p2) values(3,'2022-02-01','p3'); +truncate test_sche.logical_TB1; +--drop table test_sche.logical_TB1; + +CREATE TABLE MODIFY_TABLE_A(I INTEGER); +\d MODIFY_TABLE_A +create table aaa(a integer); +\d aaa +create table bbb(B integer); +\d bbb +create table CCC(c integer); +\d CCC +create table DDD(D integer); +\d DDD +create table EEE("E" integer); +\d EEE +create table FFF("FF" integer); +\d FFF +create table HHH("HH" integer); + +alter table aaa rename a to AA; +\d aaa +create table GGG("GdGG" integer); +alter table CCC rename c to "CC"; +alter table FFF rename FF to ff; -- differnt in b compatibility +alter table HHH rename "HH" to gg; + +rename table public.HHH to public.hhh; +rename table public.hhh to public.hhh1; + +insert into t1_full values (4,'d'); +insert into t1_full values (5, 'e'); +create type mytyp as (a int, b text); +alter table t1_full add column c timestamp default now() not null first; +alter table t1_full add column d timestamp on update current_timestamp; + +alter table t1_full add column e int auto_increment unique; +alter table t1_full alter column b set data type timestamp using now(); +alter table t1_full add column ff mytyp default(1, now()::text); +alter table t1_full add column ff33 mytyp default(1, current_timestamp(3)::text); + +alter table t1_full rename to t1_repl_index; +alter table t1_repl_index add constraint t1_pkey_a primary key (a); +alter table t1_repl_index replica identity default; +alter table t1_repl_index add column f int auto_increment unique; +alter table t1_repl_index add column f int auto_increment null unique; +alter table t1_repl_index alter column b set data type timestamp using now(); +alter table t1_repl_index add column e timestamp default now() not null; +alter table t1_repl_index alter column e set data type float using random(); +alter table t1_repl_index add column h int default random(); +alter table t1_repl_index add column h int; +alter table t1_repl_index alter column h set data type float; +update t1_repl_index set h=random(); +alter table t1_repl_index add column g timestamp generated always as (b + '1 year'); +insert into t1_repl_index (a) values (200), (201), (202); +-- drop table t1_repl_index; + +insert into tkey1 values (10), (12); +alter table tkey1 modify column b float4 auto_increment unique; +alter table tkey1 modify column b int auto_increment null unique; +drop table tkey1; + +create table blobtbl (id int primary key, a blob, b raw, c clob, d bytea); +alter table blobtbl replica identity default; +insert into blobtbl values (1, utl_raw.cast_to_raw('this is blob'), utl_raw.cast_to_raw('this is raw'), 'this is clob', decode('this is bytea', 'escape')); +insert into blobtbl values (2, utl_raw.cast_to_raw('this is blob2'), utl_raw.cast_to_raw('this is raw2'), 'this is clob2', decode('this is bytea2', 'escape')); +insert into blobtbl values (3, utl_raw.cast_to_raw('this is blob3'), utl_raw.cast_to_raw('this is raw3'), 'this is clob3', decode('this is bytea3', 'escape')); + +update blobtbl set a=utl_raw.cast_to_raw('this is blob after update'), b=utl_raw.cast_to_raw('this is raw after update'), c='this is clob after update', d=decode('this is bytea after i[date]', 'escape') where id=2; +delete from blobtbl where id=3; + +select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col into blobtbl_1 from blobtbl; + +create table blobtbl_2 as (select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col from blobtbl); + +create schema testb; +set search_path='testb'; +create table t1 (a int, b timestamp without time zone); +alter table t1 alter column b set default now(); +alter table t1 modify column b timestamp on update current_timestamp; +insert into t1 (a,b) values (1,default), (2,default),(3,'1900-01-01 1:00:00'); +alter table t1 replica identity full; +create type typ1 as (a int, b text); + +alter table t1 add column c typ1 default(1, now()::text); +alter type typ1 add attribute c timestamp; +alter table t1 add constraint t1_pkey primary key (a); +alter table t1 replica identity default; +alter table t1 alter column b set data type timestamp using now() - a; +create type typ2; +create type typ2 as (a int, b int); +alter type typ2 drop attribute a; +drop type typ2; + +create table tab1_1163900(id int not null,a1 text) partition by range(id); +create table tab2_1163900(id int not null,a1 text) partition by list(id); +create table tab3_1163900(id int not null,a1 text) partition by hash(id); +--create table; +create table t1_1163900(id int not null,a1 text); +create table t2_1163900(id int not null,a1 text); +create table t3_1163900(id int not null,a1 text); +--insert; +insert into t1_1163900(id,a1) select generate_series(1,100),'a'; +--t3_1163900; +insert into t3_1163900(id,a1) select generate_series(1,100),'a'; +--t2_1163900; +do $$ +declare +begin +for i in 1..100 loop +insert into t2_1163900 values(20,'a'); +end loop; +end $$; + +--attach; +alter table tab1_1163900 attach partition t1_1163900 for values from (1) to (1000); +alter table tab2_1163900 attach partition t2_1163900 for values in(20); +alter table tab3_1163900 attach partition t3_1163900 for values with(modulus 1,remainder 0); + +create table aaaaa1 (b int generated by default as identity (cycle increment by 10),c int); +-- \dS aaaaa_b_seq +-- insert into aaaaa(c) values(213); +-- insert into aaaaa(c) values(21); +-- insert into aaaaa values(3,121); +-- insert into aaaaa(c) values(111); +-- insert into aaaaa values(null,212); +-- alter table aaaaa alter column b drop default; +-- drop sequence aaaaa_b_seq; + +create table bbbb (a int not null); +alter table bbbb alter column a add generated by default as identity; + +create table genalways(id bigint generated always as identity (start 68 cycle maxvalue 70),name varchar(40)); + +create table genalways2(id smallint generated always as identity (start 68 cycle maxvalue 70),name varchar(40)); + +drop table if exists gentest; +create table gentest(id integer PRIMARY KEY, name varchar(40)); +/* AT_AddIdentity */ +ALTER TABLE gentest ALTER id ADD GENERATED ALWAYS AS IDENTITY (start 12 maxvalue 322); +/* AT_SetIdentity in pg compatibility */ +ALTER TABLE gentest ALTER id SET GENERATED ALWAYS; +ALTER TABLE gentest ALTER id DROP IDENTITY; +ALTER TABLE gentest ALTER id ADD GENERATED BY DEFAULT AS IDENTITY (start 99 maxvalue 1000); +ALTER TABLE gentest ALTER id DROP IDENTITY IF EXISTS; +ALTER TABLE gentest ALTER id ADD GENERATED ALWAYS AS IDENTITY (start 33 maxvalue 333); +ALTER TABLE gentest ALTER id SET GENERATED BY DEFAULT; +ALTER TABLE gentest ALTER id RESTART WITH 123; +ALTER TABLE gentest ALTER id RESTART; + + + +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +INSERT INTO range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_sales_idx ON range_sales(product_id) LOCAL; +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01'); +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_end VALUES LESS THAN (MAXVALUE); + +ALTER TABLE range_sales DROP PARTITION time_2009; +--success, drop partition time_2011 +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01'); +ALTER TABLE range_sales DROP PARTITION time_2012 update global index; + + +create table t_tinyint0018 ( + c1 tinyint, + c2 tinyint(1) default null, + c3 tinyint(10) not null default '0', + c4 tinyint default '0', + c5 text +); +alter table t_tinyint0018 add unique index i_tinyint0018(c1, c2, c5(10)); + +create table t1_addkey (a int, b int, c int, d int); +alter table t1_addkey add primary key (a, b); +alter table t1_addkey add unique (c); + + + +CREATE TABLE test_alter_autoinc_col(col int unsigned primary key); +INSERT INTO test_alter_autoinc_col VALUES(1); +ALTER TABLE test_alter_autoinc_col ADD COLUMN id int unsigned AUTO_INCREMENT unique; + + + +create table alter_table_tbl1 (a int primary key, b int); +create table alter_table_tbl2 (c int primary key, d int); +alter table alter_table_tbl2 add constraint alter_table_tbl_fk foreign key (d) references alter_table_tbl1 (a); + +create index alter_table_tbl_b_ind on alter_table_tbl1(b); + +-- disbale/enable keys +alter table alter_table_tbl1 disable keys; + + +alter table alter_table_tbl1 enable keys; + +-- drop index/key index_name +alter table alter_table_tbl1 drop index alter_table_tbl_b_ind; + +create index alter_table_tbl_b_ind on alter_table_tbl1(b); +alter table alter_table_tbl1 drop key alter_table_tbl_b_ind; +alter table alter_table_tbl2 drop primary key; + +alter table alter_table_tbl2 drop foreign key alter_table_tbl_fk; + +create index alter_table_tbl_b_ind on alter_table_tbl1(b); +alter table alter_table_tbl1 rename index alter_table_tbl_b_ind to new_alter_table_tbl_b_ind; + + +alter table alter_table_tbl1 rename to new_alter_table_tbl1; +alter table new_alter_table_tbl1 rename as new_new_alter_table_tbl1; +alter table new_new_alter_table_tbl1 rename new_new_new_alter_table_tbl1; +alter table if exists new_new_new_alter_table_tbl1 rename alter_table_tbl1; +alter table if exists not_exists_tbl rename new_not_exists_tbl; + + +alter table alter_table_tbl1 add column key int, rename index new_alter_table_tbl_b_ind to alter_table_tbl_b_ind; +alter table alter_table_tbl1 drop column key, drop key alter_table_tbl_b_ind; + +ALTER TABLE alter_table_tbl1 RENAME COLUMN a TO AB; +ALTER TABLE alter_table_tbl1 RENAME COLUMN ab TO Ab; +ALTER TABLE alter_table_tbl1 RENAME AB TO AB; +ALTER TABLE alter_table_tbl1 RENAME ab TO ab; +ALTER TABLE if exists alter_table_tbl1 RENAME COLUMN AB TO Ab; +ALTER TABLE if exists alter_table_tbl1 RENAME COLUMN Ab TO ab; +ALTER TABLE if exists alter_table_tbl1 RENAME AB TO ab; +ALTER TABLE if exists alter_table_tbl1 RENAME Ab TO AB; +ALTER TABLE if exists alter_table_tbl1 RENAME Ab AS AB; + + +ALTER TABLE alter_table_tbl1 CHANGE AB ab int; +ALTER TABLE alter_table_tbl1 CHANGE COLUMN AB ABCC int; +ALTER TABLE alter_table_tbl1 CHANGE COLUMN ABCCC AB varchar; + + +CREATE TABLE t_alter_test(c text); +ALTER TABLE t_alter_test DEFAULT COLLATE = test_collate; +ALTER TABLE t_alter_test DEFAULT CHARACTER SET = test_charset; +ALTER TABLE t_alter_test DEFAULT CHARSET = test_charset; +ALTER TABLE t_alter_test default CHARACTER SET = utf_8; +ALTER TABLE t_alter_test CHARACTER SET = utf_8; +ALTER TABLE t_alter_test convert to CHARACTER SET utf_8; + +CREATE TABLE IF NOT EXISTS test_part +( +a int primary key not null default 5, +b int, +c int, +d int +) +PARTITION BY RANGE(a) +( + PARTITION p0 VALUES LESS THAN (1000), + PARTITION p1 VALUES LESS THAN (2000), + PARTITION p2 VALUES LESS THAN (3000) +); + +create unique index idx_c on test_part (c); +create index idx_b on test_part using btree(b) local; +alter table test_part add constraint uidx_d unique(d); +alter table test_part add constraint uidx_c unique using index idx_c; + +insert into test_part (with RECURSIVE t_r(i,j,k,m) as(values(0,1,2,3) union all select i+1,j+2,k+3,m+4 from t_r where i < 2500) select * from t_r); + +ALTER TABLE test_part REBUILD PARTITION p0, p1; +ALTER TABLE test_part REBUILD PARTITION all; + +ALTER TABLE test_part ANALYZE PARTITION p0, p1; +ALTER TABLE test_part ANALYZE PARTITION all; + + +ALTER TABLE test_part remove PARTITIONING; + + +CREATE TABLE bcomp_t1(id int, t text, ref int); +CREATE TABLE bcomp_t2(id int, t text); + +alter table bcomp_t2 add constraint unique_id unique(id); +alter table bcomp_t1 add foreign key(ref) references bcomp_t2(id); +alter table bcomp_t1 drop foreign key bcomp_t1_ref_fkey; + +CREATE TABLE bcomp_test_table_1 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255), + age INT +); +ALTER TABLE bcomp_test_table_1 ADD INDEX idx_age (age); +ALTER TABLE bcomp_test_table_1 rename index idx_age to index_age; +ALTER TABLE bcomp_test_table_1 DROP INDEX index_age; + +CREATE TABLE test ( +id int unsigned auto_increment not null primary key, +title varchar, +boby text, +name name +); +CREATE FULLTEXT INDEX test_index_1 ON test (title, boby) WITH PARSER ngram; +CREATE FULLTEXT INDEX test_index_2 ON test (title, boby, name); +ALTER TABLE test ADD FULLTEXT INDEX test_index_1 (title, boby) WITH PARSER ngram; + +CREATE TABLE bcomp_test_table_2 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255), + age INT +); +ALTER TABLE bcomp_test_table_2 ADD CONSTRAINT chk_age_range CHECK (age BETWEEN 18 AND 65); +ALTER TABLE bcomp_test_table_2 DROP CONSTRAINT chk_age_range; + +CREATE TABLE bcomp_test_table_3 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255), + age INT +); +CREATE INDEX idx_age ON bcomp_test_table_3(age); +ALTER TABLE bcomp_test_table_3 ADD KEY idx_age (age); +ALTER TABLE bcomp_test_table_3 DROP KEY idx_age; + +CREATE TABLE tt (a int primary key); +alter table tt drop primary key; + +CREATE TABLE bcomp_test_table_4 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255), + age INT); +ALTER TABLE bcomp_test_table_4 DISABLE KEYS; +ALTER TABLE bcomp_test_table_4 ENABLE KEYS; + +CREATE TABLE bcomp_test_table_5 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255), + age INT); +CREATE INDEX idx_age ON bcomp_test_table_5(age); +ALTER TABLE bcomp_test_table_5 RENAME INDEX idx_age TO idx_age_1; +ALTER TABLE bcomp_test_table_5 DROP INDEX idx_age_1; + +CREATE TABLE bcomp_test_table_6 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255), + created_at DATE +) PARTITION BY RANGE COLUMNS(id) ( + PARTITION p0 VALUES LESS THAN (100), + PARTITION p1 VALUES LESS THAN (200), + PARTITION p2 VALUES LESS THAN (MAXVALUE) +); +ALTER TABLE bcomp_test_table_6 REMOVE PARTITIONING; + +CREATE DATABASE test_db; +ALTER DATABASE test_db CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; + +CREATE TABLE t(num int); + + +create definer = ddl_test_user event IF NOT EXISTS ee11 on schedule EVERY 1 day at '2022-12-09 17:24:11' disable do insert into t values(0); + +create event IF NOT EXISTS ee12 on schedule EVERY 2 day at '2022-12-09 17:24:11' ends '2028-12-09 17:24:11' disable do insert into t values(0); + +create event IF NOT EXISTS ee13 on schedule EVERY 2 day at '2022-12-09 17:24:11' disable do insert into t values(0); + +alter definer = ddl_test_user event ee13 on schedule AT '2099-12-11 17:24:11' comment 'jhhh' do insert into t values(1); + +alter definer = ddl_test_user event ee13 on schedule EVERY 1 day starts '2022-12-09 17:24:11' ends '2028-12-09 17:24:11' ON COMPLETION PRESERVE enable do insert into t values(1); + +alter event ee13 on schedule AT '2055-12-11 17:24:11' enable comment 'jhhh' do insert into t values(1); + +alter event ee12 on schedule at '2055-12-09 17:24:11' enable; + + +select job_name, nspname from pg_job where dbname='event_b'; +drop event if exists ee11; +drop event if exists ee13; + +create event IF NOT EXISTS ee14 on schedule EVERY 2 day at '2022-12-09 17:24:11' disable do insert into t values(0); + +alter event ee14 on schedule at '2055-12-09 17:24:11' disable; +alter event ee14 on schedule at '2055-12-09 17:24:11' enable; +alter event ee14 on schedule at '2055-12-09 17:24:11' DISABLE ON SLAVE; +alter event ee14 rename to ee142; +drop event if exists ee142; + +create table t1_z (col1 int primary key auto_increment , col2 text,col3 bigint); +insert into t1_z(col1,col2) values(3, 'aaa'); +alter table t1_z auto_increment = 3; + +drop table t1_z; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.teardown b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.teardown new file mode 100644 index 0000000000..7b0d5e6eb7 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.teardown @@ -0,0 +1,19 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;alter table t1_repl_index drop column myc; alter table tkey1 drop column myc" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;delete from t1_repl_index where a in (101,102); delete from tkey1 where a in (101,102);" + +exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col into blobtbl_pub_1 from blobtbl;" + +exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;create table blobtbl_pub_2 as (select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col from blobtbl);" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;drop table blobtbl_pub_1, blobtbl_pub_2" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col into blobtbl_pub_1 from blobtbl;" + +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;create table blobtbl_pub_2 as (select utl_raw.cast_to_varchar2(a) as blob_col, utl_raw.cast_to_varchar2(b) as raw_col, cast(c as varchar) as clob_col, encode(d, 'escape') as bytea_col from blobtbl);" diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.setup new file mode 100644 index 0000000000..61d578e9a1 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.setup @@ -0,0 +1,9 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $pub_node1_port "create schema rewrite; set search_path=rewrite;create table t1_full (a int, b text);insert into t1_full values (1, 'a'), (2, 'b'), (3, 'c');alter table t1_full replica identity full;" + +exec_sql $case_use_db $sub_node1_port "create schema rewrite; set search_path=rewrite;create table t1_full (a int, b text, myc int); insert into t1_full values (101, 'a', 1), (102, 'b', 2);" diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.sql new file mode 100644 index 0000000000..d91d9d0bf2 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.sql @@ -0,0 +1,26 @@ +set search_path=rewrite; +insert into t1_full values (4,'d'); +alter table t1_full add column c timestamp default now() not null first; +alter table t1_full add column d timestamp; + +alter table t1_full add column e int unique; +alter table t1_full alter column b set data type timestamp using now(); + +alter table t1_full rename to t1_repl_index; +alter table t1_repl_index add constraint t1_pkey_a primary key (a); +alter table t1_repl_index replica identity default; +alter table t1_repl_index add column f int auto_increment unique; +alter table t1_repl_index alter column b set data type timestamp using now(); +alter table t1_repl_index add column e timestamp default now() not null; +alter table t1_repl_index alter column e set data type float using random(); +alter table t1_repl_index add column h int default random(); +alter table t1_repl_index alter column h set data type float; +update t1_repl_index set h=random(); +alter table t1_repl_index add column g timestamp generated always as (b + '1 year'); + +create table t1 (a int, b timestamp without time zone); +alter table t1 alter column b set default now(); +alter table t1 modify column b timestamp on update current_timestamp; +insert into t1 (a,b) values (1,default), (2,default),(3,'1900-01-01 1:00:00'); +alter table t1 replica identity full; +alter table t1 alter column b set data type timestamp using now() - a; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.teardown b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.teardown new file mode 100644 index 0000000000..efb719a1f6 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_rewrite.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $sub_node1_port "set search_path=rewrite;alter table t1_repl_index drop column myc" +exec_sql $case_use_db $sub_node1_port "set search_path=rewrite;delete from t1_repl_index where a in (101,102)" diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.setup new file mode 100644 index 0000000000..853b24cd5a --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.setup @@ -0,0 +1,24 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $pub_node1_port "create tablespace ts_subpart_hash_1 relative location 'test/ts_subpart_hash_1'"; + +exec_sql $case_use_db $pub_node1_port "create tablespace ts_subpart_hash_2 relative location 'test/ts_subpart_hash_2'"; +exec_sql $case_use_db $pub_node1_port "create tablespace ts_subpart_hash_test_user relative location 'test/ts_subpart_hash_test_user';" +exec_sql $case_use_db $pub_node1_port "create user user_subpart_hash password 'Test@123';" + +exec_sql $case_use_db $pub_node1_port "grant CREATE, USAGE on schema schema_vastbase_subpartition_hash to user_subpart_hash"; +exec_sql $case_use_db $pub_node1_port "grant CREATE on tablespace ts_subpart_hash_test_user to user_subpart_hash;" + +exec_sql $case_use_db $sub_node1_port "create tablespace ts_subpart_hash_1 relative location 'test/ts_subpart_hash_1'"; + +exec_sql $case_use_db $sub_node1_port "create tablespace ts_subpart_hash_2 relative location 'test/ts_subpart_hash_2'"; +exec_sql $case_use_db $sub_node1_port "create tablespace ts_subpart_hash_test_user relative location 'test/ts_subpart_hash_test_user';" +exec_sql $case_use_db $sub_node1_port "create user user_subpart_hash password 'Test@123';" + +exec_sql $case_use_db $sub_node1_port "grant CREATE, USAGE on schema schema_vastbase_subpartition_hash to user_subpart_hash"; +exec_sql $case_use_db $sub_node1_port "grant CREATE on tablespace ts_subpart_hash_test_user to user_subpart_hash;" + diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.sql new file mode 100644 index 0000000000..52b3c2328f --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_subpartition.sql @@ -0,0 +1,2242 @@ +CREATE schema schema_vastbase_subpartition_hash; +set search_path to schema_vastbase_subpartition_hash; +-- init +set datestyle = 'ISO, MDY'; +set behavior_compat_options = ''; + +create table t_subpart_normal_table_hash(id int); +create table t_subpart_part_table_hash(id int) +partition by hash(id) +( + partition p1 +); + + + +---------------------------- +-- Hash subpartition syntax +---------------------------- +-- ͬԶӷֶָӷ +create table t_subpart_range_hash_1 (id integer, age integer, name varchar(30), sale integer) +partition by range(age) +subpartition by hash(age) +( +partition p1 values less than (10), +partition p2 values less than (100) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values less than (200) +); + +create table t_subpart_list_hash_1 (id integer, age integer, name varchar(30), sale integer) +partition by list(age) +subpartition by hash(age) +( +partition p1 values (1, 2, 3, 4, 5), +partition p2 values (10, 20, 30, 40, 50) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (111, 222, 333) +); + +create table t_subpart_hash_hash_1 (id integer, age integer, name varchar(30), sale integer) +partition by hash(age) +subpartition by hash(age) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 +); + +-- ӷģ塢ģ崴ӷDEFAULT +create table t_subpart_range_hash_2 (id integer, age numeric, name varchar(30), bd date) +partition by range(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (10), +partition p2 values less than (100) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values less than (MAXVALUE) + ( + subpartition sp3, + subpartition sp4 + ) +); + +create table t_subpart_list_hash_2 (id integer, age numeric, name varchar(30), bd date) +partition by list(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (1, 2, 3, 4, 5), +partition p2 values (10, 20, 30, 40, 50) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (100, 200) + ( + subpartition sp3, + subpartition sp4 + ) +); + +create table t_subpart_hash_hash_2 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 + ( + subpartition sp3, + subpartition sp4 + ) +); + +-- ָHashӷ +create table t_subpart_range_hash_3 (id integer, age numeric, name text, bd timestamp) +partition by range(id, age) +subpartition by hash(id) + subpartitions 2 +( +partition p1 values less than (10, 10.6789), +partition p2 values less than (100, 12345.6789) + subpartitions 3, +partition p3 values less than (MAXVALUE, MAXVALUE) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_list_hash_3 (id integer, age numeric, name text, bd timestamp) +partition by list(age) +subpartition by hash(id) + subpartitions 2 +( +partition p1 values (10, 10.6789), +partition p2 values (100, 12345.6789) + subpartitions 3, +partition p3 values (DEFAULT) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_hash_hash_3 (id integer, age numeric, name text, bd timestamp) +partition by hash(age) +subpartition by hash(id) + subpartitions 2 +( +partition p1, +partition p2 + subpartitions 3, +partition p3 + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_hash_hash_4 (id integer, age numeric, name text, bd timestamp) +partition by hash(age) +subpartition by hash(id) + subpartitions 2 +partitions 3; + +-- 飺/ӷ/ӷ͡ӷϵΧrelfilenodeToast +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, p1.subpartstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename like 't_subpart_range_hash_%' + or p1.tablename like 't_subpart_list_hash_%' + or p1.tablename like 't_subpart_hash_hash_%'; + +-- ӷģ +select p1.tablename, p1.subparttemplate +from schema_subpartition.v_subpartition p1 +where p1.subparttemplate is not null + and (p1.tablename like 't_subpart_range_hash_%' + or p1.tablename like 't_subpart_list_hash_%' + or p1.tablename like 't_subpart_hash_hash_%'); + +select get_subpart_template('t_subpart_range_hash_1'::regclass, 0) is null; +select pg_get_tabledef('t_subpart_range_hash_1'); +select get_subpart_template('t_subpart_range_hash_2'::regclass, 0); +select get_subpart_template('t_subpart_list_hash_3'::regclass, 2); +select get_subpart_template('t_subpart_hash_hash_4'::regclass, 4); +select pg_get_tabledef('t_subpart_range_hash_2'); +select pg_get_tabledef('t_subpart_list_hash_3'); +select pg_get_tabledef('t_subpart_hash_hash_4'); + +-- Ϊfloat4 +create table t_subpart_range_hash_float4 (col1 float4) +partition by range(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (-34.84), +partition p2 values less than (0), +partition p3 values less than (1004.3) + ( + subpartition sp1, + subpartition sp2 + ), +partition p4 values less than (1.2345678901234e+20) +); + +create table t_subpart_list_hash_float4 (col1 float4) +partition by list(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (-3.1, -3.14, -3.141, -3.1415, -3.14159, -3.141592, -3.1415926), +partition p2 values (0, 10, 100, 1000, 10000) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (1.2345678901234e-20, 1.2345678901234e-10, 1.2345678901234e+10, 1.2345678901234e+20) +); + +create table t_subpart_hash_hash_float4 (col1 float4) +partition by hash(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 +); + +-- Ϊfloat8 +create table t_subpart_range_hash_float8 (col1 float8) +partition by range(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (-34.84), +partition p2 values less than (0), +partition p3 values less than (1004.3) + ( + subpartition sp1, + subpartition sp2 + ), +partition p4 values less than (1.2345678901234e+200) +); + +create table t_subpart_list_hash_float8 (col1 float8) +partition by list(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (-3.1, -3.14, -3.141, -3.1415, -3.14159, -3.141592, -3.1415926), +partition p2 values (0, 10, 100, 1000, 10000) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (1.2345678901234e-200, 1.2345678901234e-100, 1.2345678901234e+100, 1.2345678901234e+200) +); + +create table t_subpart_hash_hash_float8 (col1 float8) +partition by hash(col1) +subpartition by hash(col1) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 +); + +-- 飺/ӷ/ӷ͡ӷϵΧrelfilenodeToast +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, p1.subpartstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename like 't_subpart_range_hash_float%' + or p1.tablename like 't_subpart_list_hash_float%' + or p1.tablename like 't_subpart_hash_hash_float%'; + +-- ӷģ +select p1.tablename, p1.subparttemplate +from schema_subpartition.v_subpartition p1 +where p1.subparttemplate is not null + and (p1.tablename like 't_subpart_range_hash_float%' + or p1.tablename like 't_subpart_list_hash_float%' + or p1.tablename like 't_subpart_hash_hash_float%'); + +---------------------------- +-- +---------------------------- +create table t_subpart_range_hash_7 (id integer primary key, age numeric, name varchar(30), bd date) +partition by range(id) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values less than (100), +partition p2 values less than (500) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_list_hash_7 (id integer primary key, age numeric, name varchar(30), bd date) +partition by list(id) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1 values (100), +partition p2 values (500) + ( + subpartition sp1, + subpartition sp2 + ) +); + +create table t_subpart_hash_hash_7 (id integer primary key, age numeric, name varchar(30), bd date) +partition by hash(id) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp2 + ) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ) +); + + +create table t_subpart_range_hash_8 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_range_hash_8 PRIMARY KEY (id, age, name)) +partition by range(age, name) +subpartition by hash(id) +( +partition p1 values less than (20, 'AAA') +); + +create table t_subpart_list_hash_8 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_list_hash_8 PRIMARY KEY (id, age, name)) +partition by list(age) +subpartition by hash(id) +( +partition p1 values (20) +); + +create table t_subpart_hash_hash_8 (id integer, age integer, name char(30), bd date, + CONSTRAINT i_t_subpart_hash_hash_8 PRIMARY KEY (id, age, name)) +partition by hash(age) +subpartition by hash(id) +( +partition p1 +); + +-- ʱָPRIMARY KEY/UNIQUEзΪȫ +create table t_subpart_range_hash_9 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_range_hash_9 PRIMARY KEY (age, name)) +partition by range(age, name) +subpartition by hash(id) +( +partition p1 values less than (100, 'AAA') +); + +create table t_subpart_list_hash_9 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_list_hash_9 PRIMARY KEY (id, name)) +partition by list(age) +subpartition by hash(id) +( +partition p1 values (100) +); + +create table t_subpart_hash_hash_9 (id integer, age numeric, name char(30), bd date, + CONSTRAINT i_t_subpart_hash_hash_9 PRIMARY KEY (bd, name)) +partition by hash(age) +subpartition by hash(id) +( +partition p1 +); + +-- ΪȫUNIQUEԲðз +create unique index i_t_subpart_range_hash_8_1 on t_subpart_range_hash_8 (id, bd); +create unique index i_t_subpart_list_hash_8_1 on t_subpart_list_hash_8 (id, bd); +create unique index i_t_subpart_hash_hash_8_1 on t_subpart_hash_hash_8 (id, bd); + + +create table t_subpart_range_hash_10 (id integer, age numeric, name char(30), bd date) +partition by range(age, name) +subpartition by hash(id) +( +partition p1 values less than (10, 'AAA') + ( + subpartition sp1 + ), +partition p2 values less than (100, 'MAXVALUE') + ( + subpartition sp2, + subpartition sp3 + ) +); + +create table t_subpart_list_hash_10 (id integer, age numeric, name char(30), bd date) +partition by list(age) +subpartition by hash(id) +( +partition p1 values (10) + ( + subpartition sp1 + ), +partition p2 values (100) + ( + subpartition sp2, + subpartition sp3 + ) +); + +create table t_subpart_hash_hash_10 (id integer, age integer, name char(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition p2 + ( + subpartition sp2, + subpartition sp3 + ) +); + +-- Ϊӱط +create unique index i_t_subpart_range_hash_10_1 on t_subpart_range_hash_10 (id) local; -- error +create unique index i_t_subpart_range_hash_10_1 on t_subpart_range_hash_10 (name, age) local; -- error +create unique index i_t_subpart_range_hash_10_1 on t_subpart_range_hash_10 (age, name, id) local; +create index i_t_subpart_range_hash_10_2 on t_subpart_range_hash_10 (name, age) local; + +create unique index i_t_subpart_list_hash_10_1 on t_subpart_list_hash_10 (age) local; -- error +create unique index i_t_subpart_list_hash_10_1 on t_subpart_list_hash_10 (name, bd) local; -- error +create unique index i_t_subpart_list_hash_10_1 on t_subpart_list_hash_10 (age, id) local; +create index i_t_subpart_list_hash_10_2 on t_subpart_list_hash_10 (name, age) local; + +create unique index i_t_subpart_hash_hash_10_1 on t_subpart_hash_hash_10 (bd) local; -- error +create unique index i_t_subpart_hash_hash_10_1 on t_subpart_hash_hash_10 (name, bd) local; -- error +create unique index i_t_subpart_hash_hash_10_1 on t_subpart_hash_hash_10 (age, id, bd) local; +create index i_t_subpart_hash_hash_10_2 on t_subpart_hash_hash_10 (age, bd) local; + +-- Ϊӱطָ +create index i_t_subpart_range_hash_10_3 on t_subpart_range_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_index_local + ), +partition p2_idx + ( + subpartition subp2_index_local + ) +); -- error +create index i_t_subpart_range_hash_10_3 on t_subpart_range_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_bd_idx_local + ), +partition p2_idx + ( + subpartition subp2_bd_idx_local, + subpartition subp3_bd_idx_local + ) +); + +create index i_t_subpart_list_hash_10_3 on t_subpart_list_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_index_local + ), +partition p2_idx + ( + subpartition subp2_index_local + ) +); -- error +create index i_t_subpart_list_hash_10_3 on t_subpart_list_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_bd_idx_local + ), +partition p2_idx + ( + subpartition subp2_bd_idx_local, + subpartition subp3_bd_idx_local + ) +); + +create index i_t_subpart_hash_hash_10_3 on t_subpart_hash_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_index_local + ), +partition p2_idx + ( + subpartition subp2_index_local + ) +); -- error +create index i_t_subpart_hash_hash_10_3 on t_subpart_hash_hash_10 (bd) local +( +partition p1_idx + ( + subpartition subp1_bd_idx_local + ), +partition p2_idx + ( + subpartition subp2_bd_idx_local, + subpartition subp3_bd_idx_local + ) +); + +-- ΪȫĿǰͨûʲô +create unique index i_t_subpart_range_hash_10_4 on t_subpart_range_hash_10 (name, age) global; -- error +create unique index i_t_subpart_range_hash_10_4 on t_subpart_range_hash_10 (age, bd) global; +drop index i_t_subpart_range_hash_10_2; +create unique index i_t_subpart_range_hash_10_5 on t_subpart_range_hash_10 (name, age) global; + +create unique index i_t_subpart_list_hash_10_4 on t_subpart_list_hash_10 (name, age) global; -- error +create unique index i_t_subpart_list_hash_10_4 on t_subpart_list_hash_10 (name, bd) global; +drop index i_t_subpart_list_hash_10_2; +create unique index i_t_subpart_list_hash_10_5 on t_subpart_list_hash_10 (name, age) global; + +create unique index i_t_subpart_hash_hash_10_4 on t_subpart_hash_hash_10 (bd, age) global; -- error +create unique index i_t_subpart_hash_hash_10_4 on t_subpart_hash_hash_10 (name, id) global; +drop index i_t_subpart_hash_hash_10_2; +create unique index i_t_subpart_hash_hash_10_5 on t_subpart_hash_hash_10 (bd, age) global; + + +-- +select p1.tablename, p1.relname, p1.reltoastidxid, p1.indextblid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_range_hash_7', 't_subpart_range_hash_8', 't_subpart_range_hash_10'); + +select p1.tablename, p1.relname, p1.reltoastidxid, p1.indextblid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_list_hash_7', 't_subpart_list_hash_8', 't_subpart_list_hash_10'); + +select p1.tablename, p1.relname, p1.reltoastidxid, p1.indextblid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_hash_hash_7', 't_subpart_hash_hash_8', 't_subpart_hash_hash_10'); + + +-- 鿴 +select * from pg_indexes where tablename like 't_subpart_range_hash_%' order by tablename, indexname; +select * from pg_indexes where tablename like 't_subpart_list_hash_%' order by tablename, indexname; +select * from pg_indexes where tablename like 't_subpart_hash_hash_%' order by tablename, indexname; + + +-- \d +\d t_subpart_range_hash_8 +\d t_subpart_list_hash_8 +\d t_subpart_hash_hash_8 +\d t_subpart_range_hash_10 +\d t_subpart_list_hash_10 +\d t_subpart_hash_hash_10 + + + +---------------------------- +-- []ռ +---------------------------- +create table t_subpart_range_hash_11 (id integer, age numeric, name varchar(30), bd date) +partition by range(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1 values less than (10), +partition p2 values less than (100) tablespace ts_subpart_hash_2, +partition p3 values less than (1000) + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 values less than (MAXVALUE) tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +create table t_subpart_list_hash_11 (id integer, age numeric, name varchar(30), bd date) +partition by list(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1 values (10), +partition p2 values (20) tablespace ts_subpart_hash_2, +partition p3 values (30) + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 values (DEFAULT) tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +create table t_subpart_hash_hash_11 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1, +partition p2 tablespace ts_subpart_hash_2, +partition p3 + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +create table t_subpart_hash_hash_11_2 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartitions 3 store in (ts_subpart_hash_1, ts_subpart_hash_2) +partitions 5 store in (ts_subpart_hash_2, ts_subpart_hash_1); + +-- TODO Ŀǰ֧HashHashӷ +alter table t_subpart_hash_hash_11_2 add partition p6; +alter table t_subpart_hash_hash_11_2 modify partition p6 add subpartition p6_sp3; + +-- ӷıռ +select p1.tablename, p1.relname, p1.parttype, p2.spcname tablespace_name +from schema_subpartition.v_subpartition p1 left join pg_tablespace p2 on p1.reltablespace = p2.oid +where p1.tablename in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11', 't_subpart_hash_hash_11_2') +order by p1.parentid, p1.oid; + +-- ӷģ +select p1.tablename, p1.subparttemplate +from schema_subpartition.v_subpartition p1 +where p1.subparttemplate is not null + and p1.tablename in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11', 't_subpart_hash_hash_11_2'); + + +-- error, ûбռȨ +SET SESSION AUTHORIZATION user_subpart_hash PASSWORD 'Test@123'; +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_test_user, + subpartition sp2 tablespace ts_subpart_hash_1 + ) +( +partition p1 +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 tablespace ts_subpart_hash_test_user, +partition p2 tablespace ts_subpart_hash_1 +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 tablespace ts_subpart_hash_test_user, + subpartition sp2 tablespace ts_subpart_hash_1 + ) +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) + subpartitions 2 store in (ts_subpart_hash_test_user, ts_subpart_hash_1) +( +partition p1 +); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +partitions 2 store in (ts_subpart_hash_test_user, ts_subpart_hash_1); + +create table t_subpart_hash_hash_12 (id integer, age numeric, name varchar(30), bd date) +partition by hash(age) +subpartition by hash(id) +( +partition p1 + subpartitions 2 store in (ts_subpart_hash_test_user, ts_subpart_hash_1) +); + +RESET SESSION AUTHORIZATION; + + + +---------------------------- +-- syntax error +---------------------------- +-- а˶ӷ +create table t_subpart_error (id integer, name varchar(30)) +partition by range(id) +( +partition p1 values less than (10) + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(id) +( +partition p1 values (10) + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +( +partition p1 + ( + subpartition sp1 + ) +); + + +-- ӷظ +create table t_subpart_error (id integer, name varchar(30)) +partition by range(name) +subpartition by hash(id) +( +partition p1 values less than ('a') + ( + subpartition sp1 + ), +partition p2 values less than ('A') + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(name) +subpartition by hash(id) +( +partition p1 values ('a') + ( + subpartition sp1 + ), +partition p2 values ('A') + ( + subpartition sp1 + ) +); + +create table t_subpart_error (id integer, name int8) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition p2 + ( + subpartition sp1 + ) +); + + +-- ӷģӷظ +create table t_subpart_error (id integer, name varchar(30)) +partition by range(name) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp1 + ) +( +partition p1 values less than ('a') +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(name) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp1 + ) +( +partition p1 values ('a') +); + +create table t_subpart_error (id integer, name int2) +partition by hash(name) +subpartition by hash(id) + subpartition template + ( + subpartition sp1, + subpartition sp1 + ) +( +partition p1 +); + + +-- ӷظ +create table t_subpart_error (id integer, name varchar(30)) +partition by range(name) +subpartition by hash(id) +( +partition p1 values less than ('10') + ( + subpartition sp1 + ), +partition p2 values less than ('100') + ( + subpartition p1 + ) +); + +create table t_subpart_error (id integer, name varchar(30)) +partition by list(id) +subpartition by hash(id) +( +partition p1 values ('10') + ( + subpartition sp1 + ), +partition p2 values ('100') + ( + subpartition p1 + ) +); + +create table t_subpart_error (id integer, name int4) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition p2 + ( + subpartition p1 + ) +); +create table t_subpart_error (id integer, name int4) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition sp1 + ), +partition sp1 + ( + subpartition sp2 + ) +); + + +-- Զɵӷظ +create table t_subpart_error (id integer, name text) +partition by range(name) +subpartition by hash(id) +( +partition p1 values less than ('10') + ( + subpartition p2_subpartdefault1 + ), +partition p2 values less than ('100') +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name text) +partition by list(name) +subpartition by hash(id) +( +partition p1 values ('10') + ( + subpartition p2_subpartdefault1 + ), +partition p2 values ('100') +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name integer) +partition by hash(name) +subpartition by hash(id) +( +partition p1 + ( + subpartition p2_subpartdefault1 + ), +partition p2 +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(id) +( +partition p1, +partition p2 + ( + subpartition p1_subpartdefault1 + ) +); +drop table t_subpart_error; + +-- ӷ +create table t_subpart_error (id integer, name varchar(30), age int, bd varchar(30), addr varchar(30)) +partition by hash(id) +subpartition by hash(id, name, age, bd, addr) +( +partition p1 +); + + +-- ӷʹ +create table t_subpart_error (id integer, name varchar(30), m money) +partition by hash(id) +subpartition by hash(m) +( +partition p1 +); +create table t_subpart_error (id integer, name varchar(30), m money) -- now is ok +partition by hash(id) +subpartition by hash(name) +( +partition p1 +); +drop table t_subpart_error; +create table t_subpart_error (id integer, name varchar(30), bd date) -- now is ok +partition by hash(id) +subpartition by hash(bd) +( +partition p1 +); +drop table t_subpart_error; + + +-- HashӷʹRange﷨ +create table t_subpart_error (id integer, name varchar(30), age int) +partition by hash(id) +subpartition by hash(age) +( +partition p1 + ( + subpartition sp1 values less than (1) + ) +); +create table t_subpart_error (id integer, name varchar(30), age int) +partition by hash(id) +subpartition by hash(age) +( +partition p1 + ( + subpartition sp1 end (1) + ) +); + +-- HashӷʹList﷨ +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) +( +partition p1 + ( + subpartition sp1 values ('a', 'b') + ) +); + +-- Hashӷģ壬ʹRange/List﷨ +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values less than (1) + ) +( +partition p1 +); +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 end (1) + ) +( +partition p1 +); +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values ('a') + ) +( +partition p1 +); + + +-- ӷģ﷨ +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 + ( + subpartition ssp1 values (DEFAULT) + ) + ) +( +partition p1 +); + + +-- DEFAULT[]쳣 +---- ֻһDEFAULTǴ﷨ƣ +---- һ[]ΪDEFAULT +-- עԶɵӷֻһDEFAULTӷ +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values (DEFAULT) + ) +( +partition p1 +); +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) + subpartition template + ( + subpartition sp1 values (DEFAULT), + subpartition sp2 + ) +( +partition p1 +); +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) +( +partition p1 + ( + subpartition sp1 values (DEFAULT) + ) +); +create table t_subpart_error (id integer, name integer) +partition by hash(id) +subpartition by hash(name) +( +partition p1 + ( + subpartition sp1 values (DEFAULT), + subpartition sp2 + ) +); + + + +---------------------------- +-- []޸ +---------------------------- +alter table t_subpart_hash_hash_2 drop column age; +alter table t_subpart_hash_hash_2 drop column id; +alter table t_subpart_hash_hash_2 modify (age numeric(6,1)); +alter table t_subpart_hash_hash_2 modify (id text); + + + +---------------------------- +-- +---------------------------- +alter table t_subpart_range_hash_1 add partition p4 values less than (300); +alter table t_subpart_range_hash_1 add partition p5 start (300) end (400) +( + subpartition sp3, + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_range_hash_1 add partition p6 values less than (500) +( + subpartition sp6, + subpartition sys_subp4294967295 +); + +alter table t_subpart_list_hash_1 add partition p4 values (300); +alter table t_subpart_list_hash_1 add partition p5 values (400) +( + subpartition sp3, + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_list_hash_1 add partition p6 values (500) +( + subpartition sp6, + subpartition sys_subp4294967295 +); + +alter table t_subpart_hash_hash_1 add partition p4; +alter table t_subpart_hash_hash_1 add partition p5 +( + subpartition sp3, + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_hash_hash_1 add partition p6 +( + subpartition sp6, + subpartition sys_subp4294967295 +); + + +alter table t_subpart_range_hash_7 add partition p3 end (1000); +alter table t_subpart_range_hash_7 add partition p4 values less than (2000) +( + subpartition sp3, + subpartition p5_sp2 +); +alter table t_subpart_range_hash_10 add partition p3 values less than (MAXVALUE, MAXVALUE) +( + subpartition sp4, + subpartition sp5 +); + +alter table t_subpart_list_hash_7 add partition p3 values (1000); +alter table t_subpart_list_hash_7 add partition p4 values (2000) +( + subpartition sp3, + subpartition p5_sp2 +); +alter table t_subpart_list_hash_10 add partition p3 values (DEFAULT) +( + subpartition sp4, + subpartition sp5 +); +alter table t_subpart_hash_hash_7 add partition p3; +alter table t_subpart_hash_hash_7 add partition p4 +( + subpartition sp3, + subpartition p5_sp2 +); +alter table t_subpart_hash_hash_10 add partition p3 +( + subpartition sp4, + subpartition sp5 +); + + +---- ͨӷ +alter table t_subpart_normal_table_hash add partition p1; +alter table t_subpart_normal_table_hash add partition p2 +( + subpartition sp1, + subpartition sp2 +); + +---- һӷ +alter table t_subpart_part_table_hash add partition p2 +( + subpartition sp1, + subpartition sp2 +); + + +---- [][]岻ƥ +alter table t_subpart_range_hash_1 add partition p_error values (500); +alter table t_subpart_range_hash_1 add partition p_error; +alter table t_subpart_range_hash_1 add partition p7 end (500) +( + subpartition sp_error values less than (100) +); +alter table t_subpart_range_hash_1 add partition p7 end (500) +( + subpartition sp_error start (100) +); +alter table t_subpart_range_hash_1 add partition p7 end (500) +( + subpartition sp_error values (0) +); + +alter table t_subpart_list_hash_1 add partition p_error values less than (500); +alter table t_subpart_list_hash_1 add partition p_error; +alter table t_subpart_list_hash_1 add partition p7 values (700) +( + subpartition sp_error end (100) +); +alter table t_subpart_list_hash_1 add partition p7 values (700) +( + subpartition sp_error values (0) +); + +alter table t_subpart_hash_hash_1 add partition p_error values less than (500); +alter table t_subpart_hash_hash_1 add partition p_error end (500); +alter table t_subpart_hash_hash_1 add partition p_error values (0); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error values less than (100) +); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error end (100) +); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error values (1) +); +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartition sp_error +); +alter table t_subpart_hash_hash_1 add partitions 1; +alter table t_subpart_hash_hash_1 add partition p7 +( + subpartitions 2 +); + + +---- ظ +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition sp3, + subpartition sp3 +); +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition sp3, + subpartition p_error +); +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition sp1, + subpartition sp22 +); +alter table t_subpart_hash_hash_1 add partition p_error +( + subpartition p1 +); +alter table t_subpart_hash_hash_7 add partition p3; +alter table t_subpart_hash_hash_7 add partition sp1; + +---- ӷѾ +alter table t_subpart_range_hash_7 add partition p5 values less than (MAXVALUE); +alter table t_subpart_list_hash_7 add partition p5 values (DEFAULT); +alter table t_subpart_hash_hash_7 add partition p5; + + +---- MAXVALUE/DEFAULT +alter table t_subpart_range_hash_10 add partition p_error values less than (9999, 9999); +alter table t_subpart_list_hash_10 add partition p_error values (9999); +alter table t_subpart_hash_hash_10 add partition p_error; + + + +---------------------------- +-- ӷ +---------------------------- +alter table t_subpart_range_hash_1 modify partition p2 add subpartition p2_sp20; +alter table t_subpart_range_hash_10 modify partition p1 add subpartition p1_sp22; + +alter table t_subpart_list_hash_1 modify partition p2 add subpartition p2_sp20; +alter table t_subpart_list_hash_10 modify partition p1 add subpartition p1_sp22; + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition p2_sp20; +alter table t_subpart_hash_hash_10 modify partition p1 add subpartition p1_sp22; + + +---- ͨӷ +alter table t_subpart_normal_table_hash modify partition p1 add subpartition sp1; +alter table t_subpart_normal_table_hash modify partition p1 add subpartition sp1 +( + subpartition sp3, + subpartition sp4 +); + +---- һӷ +alter table t_subpart_part_table_hash modify partition p1 add subpartition sp1; +alter table t_subpart_part_table_hash modify partition p1 add subpartition sp1 +( + subpartition sp3, + subpartition sp4 +); + +---- ӷӷ岻ƥ +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp_error values less than (10); +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp_error end (10); +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp_error values (1000); + +---- +alter table t_subpart_range_hash_1 modify partition p_error add subpartition sp_error; +alter table t_subpart_range_hash_1 modify partition for (999) add subpartition sp_error; +alter table t_subpart_list_hash_1 modify partition p_error add subpartition sp_error; +alter table t_subpart_list_hash_1 modify partition for (999) add subpartition sp_error; +alter table t_subpart_hash_hash_1 modify partition p_error add subpartition sp21; +alter table t_subpart_hash_hash_1 modify partition for (999) add subpartition sp_error; + +---- ظ +alter table t_subpart_range_hash_1 modify partition p2 add subpartition sp1; +alter table t_subpart_range_hash_1 modify partition p2 add subpartition sp3; +alter table t_subpart_range_hash_1 modify partition p2 add subpartition p1; + +alter table t_subpart_list_hash_1 modify partition p2 add subpartition sp1; +alter table t_subpart_list_hash_1 modify partition p2 add subpartition sp3; +alter table t_subpart_list_hash_1 modify partition p2 add subpartition p1; + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp1; +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition sp3; +alter table t_subpart_hash_hash_1 modify partition p2 add subpartition p1; + +alter table t_subpart_hash_hash_1 modify partition p2 add subpartitions 1; + +-- 飬ӷ +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_range_hash_1', 't_subpart_range_hash_7', 't_subpart_range_hash_10', + 't_subpart_list_hash_1', 't_subpart_list_hash_7', 't_subpart_list_hash_10', + 't_subpart_hash_hash_1', 't_subpart_hash_hash_7', 't_subpart_hash_hash_10'); + + + +-- 飬ӷıռ +create table t_subpart_hash_hash_13 (id integer, age int) +partition by hash(id) +subpartition by hash(age) + subpartition template + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ) +( +partition p1, +partition p2 tablespace ts_subpart_hash_2, +partition p3 + ( + subpartition sp1 tablespace ts_subpart_hash_1, + subpartition sp2 + ), +partition p4 tablespace ts_subpart_hash_2 + ( + subpartition sp3 tablespace ts_subpart_hash_1, + subpartition sp4 + ) +); + +alter table t_subpart_hash_hash_13 add partition p5; +alter table t_subpart_hash_hash_13 add partition p6 tablespace ts_subpart_hash_2; +alter table t_subpart_hash_hash_13 add partition p7 tablespace ts_subpart_hash_2 +( + subpartition sp5, + subpartition sp6 tablespace ts_subpart_hash_1 +); +alter table t_subpart_hash_hash_13 add partition p8 +( + subpartition sp7, + subpartition sp8 tablespace ts_subpart_hash_1 +); + +alter table t_subpart_hash_hash_13 modify partition p1 add subpartition p1_sp20; +alter table t_subpart_hash_hash_13 modify partition p1 add subpartition p1_sp21 tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_13 modify partition p2 add subpartition p2_sp22; +alter table t_subpart_hash_hash_13 modify partition p2 add subpartition p2_sp23 tablespace ts_subpart_hash_1; + + +-- error, /ӷûбռȨ +SET SESSION AUTHORIZATION user_subpart_hash PASSWORD 'Test@123'; +create table t_subpart_hash_hash_14 (id integer, age integer) +partition by hash (id) +subpartition by hash (age) +( +partition p1 + ( + subpartition sp1 + ) +); + +alter table t_subpart_hash_hash_14 add partition p2 tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_14 add partition p2 +( + subpartition sp2, + subpartition sp3 tablespace ts_subpart_hash_1 +); +alter table t_subpart_hash_hash_14 modify partition p1 add subpartition p1_sp2 tablespace ts_subpart_hash_1; + +drop table t_subpart_hash_hash_14; +RESET SESSION AUTHORIZATION; + +select p1.tablename, p1.relname, p1.parttype, p2.spcname tablespace_name +from schema_subpartition.v_subpartition p1 left join pg_tablespace p2 on p1.reltablespace = p2.oid +where p1.tablename = 't_subpart_hash_hash_13' +order by p1.parentid, p1.oid; + + + +---------------------------- +-- ɾ +---------------------------- +alter table t_subpart_range_hash_1 drop partition p6; +alter table t_subpart_range_hash_1 drop partition for (350); -- drop p5 +alter table t_subpart_range_hash_7 drop partition p3; +alter table t_subpart_range_hash_10 drop partition for (1, 'A'); -- drop p1 + +alter table t_subpart_list_hash_1 drop partition p6; +alter table t_subpart_list_hash_1 drop partition for (400); -- drop p5 +alter table t_subpart_list_hash_7 drop partition p3; +alter table t_subpart_list_hash_10 drop partition for (10); -- drop p1 + +alter table t_subpart_hash_hash_1 drop partition p6; +alter table t_subpart_hash_hash_1 drop partition for (4); -- drop p5 +alter table t_subpart_hash_hash_7 drop partition p3; +alter table t_subpart_hash_hash_10 drop partition for (10); -- drop p1 +alter table t_subpart_hash_hash_13 drop partition p4; + + +-- ɾ +alter table t_subpart_range_hash_1 drop partition p_error; +alter table t_subpart_range_hash_7 drop partition for (9999); + +alter table t_subpart_list_hash_1 drop partition p_error; +alter table t_subpart_list_hash_7 drop partition for (9999); + +alter table t_subpart_hash_hash_1 drop partition p_error; +alter table t_subpart_hash_hash_7 drop partition for (9999); + + +-- ֻDEFAULT +alter table t_subpart_list_hash_10 drop partition p2; + + +-- ٱ1 +alter table t_subpart_range_hash_10 drop partition p3; -- ok +alter table t_subpart_range_hash_10 drop partition p2; -- error + +alter table t_subpart_list_hash_10 drop partition p3; -- ok +alter table t_subpart_list_hash_10 drop partition p2; -- error + +alter table t_subpart_hash_hash_10 drop partition p3; -- error +alter table t_subpart_hash_hash_10 drop partition p2; + + + +---------------------------- +-- ɾӷ +---------------------------- +alter table t_subpart_range_hash_1 drop subpartition sp1; +alter table t_subpart_range_hash_7 drop subpartition for (100, 101); -- drop sp2 + +alter table t_subpart_list_hash_1 drop subpartition sp1; +alter table t_subpart_list_hash_7 drop subpartition for (500, 101); -- drop sp2 + +alter table t_subpart_hash_hash_1 drop subpartition sp1; +alter table t_subpart_hash_hash_7 drop subpartition for (1, 9); -- drop sp2 +alter table t_subpart_hash_hash_13 drop subpartition sp2; +alter table t_subpart_hash_hash_13 drop subpartition for (4, 100); -- drop p5_sp1 + +-- ɾӷ +alter table t_subpart_range_hash_1 drop subpartition sp_error; +alter table t_subpart_range_hash_7 drop subpartition for (100, 1); + +alter table t_subpart_list_hash_1 drop subpartition sp_error; +alter table t_subpart_list_hash_7 drop subpartition for (500, 1); + +alter table t_subpart_hash_hash_1 drop subpartition sp_error; +alter table t_subpart_hash_hash_7 drop subpartition for (501, 1); + +-- ٱ1ӷ +alter table t_subpart_range_hash_7 drop subpartition sp1; +alter table t_subpart_list_hash_7 drop subpartition sp1; +alter table t_subpart_hash_hash_7 drop subpartition sp1; + + +-- 飬ɾӷ +select p1.tablename, p1.relname, p1.parttype, p1.partstrategy, +p1.parentid, p1.boundaries, p1.relfilenode, p1.reltoastrelid +from schema_subpartition.v_subpartition p1 +where p1.tablename in ('t_subpart_range_hash_1', 't_subpart_range_hash_7', 't_subpart_range_hash_10', + 't_subpart_list_hash_1', 't_subpart_list_hash_7', 't_subpart_list_hash_10', + 't_subpart_hash_hash_1', 't_subpart_hash_hash_7', 't_subpart_hash_hash_10'); + +-- 飬ɾӷıռ +select p1.tablename, p1.relname, p1.parttype, p2.spcname tablespace_name +from schema_subpartition.v_subpartition p1 left join pg_tablespace p2 on p1.reltablespace = p2.oid +where p1.tablename = 't_subpart_hash_hash_13' +order by p1.parentid, p1.oid; + + + +---------------------------- +-- ݲ +---------------------------- +select * from t_subpart_range_hash_1 partition (p2); +select * from t_subpart_range_hash_1 partition for (10); +select * from t_subpart_range_hash_1 subpartition (sp2); +select * from t_subpart_range_hash_1 subpartition for (50, 51); + +select * from t_subpart_list_hash_1 partition (p2); +select * from t_subpart_list_hash_1 partition for (10); +select * from t_subpart_list_hash_1 subpartition (sp2); +select * from t_subpart_list_hash_1 subpartition for (50, 51); + +select * from t_subpart_hash_hash_1 partition (p2); +select * from t_subpart_hash_hash_1 partition for (1); +select * from t_subpart_hash_hash_1 subpartition (sp2); +select * from t_subpart_hash_hash_1 subpartition for (51, 51); + + + +---------------------------- +-- ݸ +---------------------------- +update t_subpart_range_hash_1 partition (p2) set id = id + 10; +update t_subpart_range_hash_1 partition for (10) set id = id + 10; +update t_subpart_range_hash_1 subpartition (sp2) set id = id + 10; +update t_subpart_range_hash_1 subpartition for (50, 51) set id = id + 10; + +update t_subpart_list_hash_1 partition (p2) set id = id + 10; +update t_subpart_list_hash_1 partition for (10) set id = id + 10; +update t_subpart_list_hash_1 subpartition (sp2) set id = id + 10; +update t_subpart_list_hash_1 subpartition for (50, 51) set id = id + 10; + +update t_subpart_hash_hash_1 partition (p2) set id = id + 10; +update t_subpart_hash_hash_1 partition for (1) set id = id + 10; +update t_subpart_hash_hash_1 subpartition (sp2) set id = id + 10; +update t_subpart_hash_hash_1 subpartition for (51, 51) set id = id + 10; + + + +---------------------------- +-- ɾ +---------------------------- +delete from t_subpart_range_hash_1 partition (p2); +delete from t_subpart_range_hash_1 partition for (10); +delete from t_subpart_range_hash_1 subpartition (sp2); +delete from t_subpart_range_hash_1 subpartition for (50, 51); + +delete from t_subpart_list_hash_1 partition (p2); +delete from t_subpart_list_hash_1 partition for (10); +delete from t_subpart_list_hash_1 subpartition (sp2); +delete from t_subpart_list_hash_1 subpartition for (50, 51); + +delete from t_subpart_hash_hash_1 partition (p2); +delete from t_subpart_hash_hash_1 partition for (1); +delete from t_subpart_hash_hash_1 subpartition (sp2); +delete from t_subpart_hash_hash_1 subpartition for (51, 51); + + + +---------------------------- +-- ִмƻ +---------------------------- +-- range-hash +create table t_subpart_range_hash_20 (id integer, name text) +partition by range(name) +subpartition by hash(name) +( +partition p1 values less than ('e'), +partition p2 values less than ('k') + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values less than (MAXVALUE) + ( + subpartition sp3, + subpartition sp4 + ) +); +insert into t_subpart_range_hash_20 values (1,'a'); +insert into t_subpart_range_hash_20 values (2,'e'); +insert into t_subpart_range_hash_20 values (3,'g'); +insert into t_subpart_range_hash_20 values (4,'m'); +insert into t_subpart_range_hash_20 values (5,'r'); +insert into t_subpart_range_hash_20 values (6,NULL); + +explain(costs off) select * from t_subpart_range_hash_20; + +explain(costs off) select * from t_subpart_range_hash_20 where name is null; +select * from t_subpart_range_hash_20 where name is null; +explain(costs off) select * from t_subpart_range_hash_20 where name is not null; +select * from t_subpart_range_hash_20 where name is not null; + +explain(costs off) select * from t_subpart_range_hash_20 where name = 'e'; +select * from t_subpart_range_hash_20 where name = 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name > 'e'; +select * from t_subpart_range_hash_20 where name > 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name >= 'e'; +select * from t_subpart_range_hash_20 where name >= 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name < 'e'; +select * from t_subpart_range_hash_20 where name < 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name <= 'e'; +select * from t_subpart_range_hash_20 where name <= 'e'; +explain(costs off) select * from t_subpart_range_hash_20 where name <> 'e'; +select * from t_subpart_range_hash_20 where name <> 'e'; + +explain(costs off) select * from t_subpart_range_hash_20 where name = 'e' and name is null; +select * from t_subpart_range_hash_20 where name = 'e' and name is null; +explain(costs off) select * from t_subpart_range_hash_20 where name = 'e' or name is null; +select * from t_subpart_range_hash_20 where name = 'e' or name is null; + +explain(costs off) select * from t_subpart_range_hash_20 where name in ('r', NULL); +select * from t_subpart_range_hash_20 where name in ('r', NULL); +explain(costs off) select * from t_subpart_range_hash_20 where name = any(array['e', 'g']) or name in ('r', NULL); +select * from t_subpart_range_hash_20 where name = any(array['e', 'g']) or name in ('r', NULL); + + +-- list-hash +create table t_subpart_list_hash_20 (id integer, age integer, name text) +partition by list(age) +subpartition by hash(name) +( +partition p1 values (1, 2, 3), +partition p2 values (10, 20, 50, 60) + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 values (DEFAULT) + ( + subpartition sp3, + subpartition sp4 + ) +); +insert into t_subpart_list_hash_20 values (1, 1, NULL); +insert into t_subpart_list_hash_20 values (2, 20, 'b'); +insert into t_subpart_list_hash_20 values (3, 50, 'f'); +insert into t_subpart_list_hash_20 values (4, 100, NULL); +insert into t_subpart_list_hash_20 values (5, NULL, 'g'); +insert into t_subpart_list_hash_20 values (6, NULL, NULL); + +explain(costs off) select * from t_subpart_list_hash_20; + +explain(costs off) select * from t_subpart_list_hash_20 where age is null; +select * from t_subpart_list_hash_20 where age is null; +explain(costs off) select * from t_subpart_list_hash_20 where age is not null; +select * from t_subpart_list_hash_20 where age is not null; +explain(costs off) select * from t_subpart_list_hash_20 where name is null; +select * from t_subpart_list_hash_20 where name is null; +explain(costs off) select * from t_subpart_list_hash_20 where name is not null; +select * from t_subpart_list_hash_20 where name is not null; + +explain(costs off) select * from t_subpart_list_hash_20 where age is null and name is null; +select * from t_subpart_list_hash_20 where age is null and name is null; +explain(costs off) select * from t_subpart_list_hash_20 where age is null or name is null; +select * from t_subpart_list_hash_20 where age is null or name is null; + +explain(costs off) select * from t_subpart_list_hash_20 where age = 20; +select * from t_subpart_list_hash_20 where age = 20; +explain(costs off) select * from t_subpart_list_hash_20 where name = 'b'; +select * from t_subpart_list_hash_20 where name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 and name = 'b'; +select * from t_subpart_list_hash_20 where age = 20 and name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 or name = 'b'; +select * from t_subpart_list_hash_20 where age = 20 or name = 'b'; + +explain(costs off) select * from t_subpart_list_hash_20 where age is null and name = 'b'; +select * from t_subpart_list_hash_20 where age is null and name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age is null or name = 'b'; +select * from t_subpart_list_hash_20 where age is null or name = 'b'; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 and name is null; +select * from t_subpart_list_hash_20 where age = 20 and name is null; +explain(costs off) select * from t_subpart_list_hash_20 where age = 20 or name is null; +select * from t_subpart_list_hash_20 where age = 20 or name is null; + +explain(costs off) select * from t_subpart_list_hash_20 where name = any(array['g', NULL]); +select * from t_subpart_list_hash_20 where name = any(array['g', NULL]); +explain(costs off) select * from t_subpart_list_hash_20 where age in (20, 200) and name = any(array['g', NULL]); +select * from t_subpart_list_hash_20 where age in (20, 200) and name = any(array['g', NULL]); + + +-- hash-hash +create table t_subpart_hash_hash_20 (id integer, name text, bd time) +partition by hash(name) +subpartition by hash(bd) +( +partition p1, +partition p2 + ( + subpartition sp1, + subpartition sp2 + ), +partition p3 + ( + subpartition sp3, + subpartition sp4 + ) +); +insert into t_subpart_hash_hash_20 values (1, 'a', '1:2:3'); +insert into t_subpart_hash_hash_20 values (2, 'g', NULL); +insert into t_subpart_hash_hash_20 values (3, 'h', '11:2:3'); +insert into t_subpart_hash_hash_20 values (4, 'o', NULL); +insert into t_subpart_hash_hash_20 values (5, 't', '21:0:0'); +insert into t_subpart_hash_hash_20 values (6, NULL, NULL); + +explain(costs off) select * from t_subpart_hash_hash_20; + +explain(costs off) select * from t_subpart_hash_hash_20 where name is null; +select * from t_subpart_hash_hash_20 where name is null; +explain(costs off) select * from t_subpart_hash_hash_20 where name is not null; +select * from t_subpart_hash_hash_20 where name is not null; +explain(costs off) select * from t_subpart_hash_hash_20 where bd is null; +select * from t_subpart_hash_hash_20 where bd is null; +explain(costs off) select * from t_subpart_hash_hash_20 where bd is not null; +select * from t_subpart_hash_hash_20 where bd is not null; +explain(costs off) select * from t_subpart_hash_hash_20 where name is null and bd is null; +select * from t_subpart_hash_hash_20 where name is null and bd is null; + +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g'; +select * from t_subpart_hash_hash_20 where name = 'g'; +explain(costs off) select * from t_subpart_hash_hash_20 where bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' and bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name = 'g' and bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' or bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name = 'g' or bd = '11:2:3'; + +explain(costs off) select * from t_subpart_hash_hash_20 where name is null and bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name is null and bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name is null or bd = '11:2:3'; +select * from t_subpart_hash_hash_20 where name is null or bd = '11:2:3'; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' and bd is null; +select * from t_subpart_hash_hash_20 where name = 'g' and bd is null; +explain(costs off) select * from t_subpart_hash_hash_20 where name = 'g' or bd is null; +select * from t_subpart_hash_hash_20 where name = 'g' or bd is null; + +explain(costs off) select * from t_subpart_hash_hash_20 where bd = any(array['11:2:3'::time, '21:0:0'::time]); +select * from t_subpart_hash_hash_20 where bd = any(array['11:2:3'::time, '21:0:0'::time]); +explain(costs off) select * from t_subpart_hash_hash_20 where name in ('g','o') and bd = any(array['11:2:3'::time, '21:0:0'::time]); +select * from t_subpart_hash_hash_20 where name in ('g','o') and bd = any(array['11:2:3'::time, '21:0:0'::time]); + + + +---------------------------- +-- TRUNCATE ӷ +---------------------------- +-- PARTITION [FOR] +alter table t_subpart_range_hash_1 truncate partition p1; +alter table t_subpart_range_hash_1 truncate partition for (10); +alter table t_subpart_range_hash_10 truncate partition p2; +alter table t_subpart_range_hash_10 truncate partition for (10, 'MAXVALUE'); + +alter table t_subpart_list_hash_1 truncate partition p1; +alter table t_subpart_list_hash_1 truncate partition for (10); +alter table t_subpart_list_hash_10 truncate partition p2; +alter table t_subpart_list_hash_10 truncate partition for (100); + +alter table t_subpart_hash_hash_1 truncate partition p1; +alter table t_subpart_hash_hash_1 truncate partition for (0); +alter table t_subpart_hash_hash_7 truncate partition p1; +alter table t_subpart_hash_hash_7 truncate partition for (100); +alter table t_subpart_hash_hash_10 truncate partition p2; +alter table t_subpart_hash_hash_10 truncate partition for (1); + +-- SUBPARTITION [FOR] +alter table t_subpart_range_hash_1 truncate subpartition p1_subpartdefault1; +alter table t_subpart_range_hash_1 truncate subpartition for (100, 51); +alter table t_subpart_range_hash_10 truncate subpartition sp2; +alter table t_subpart_range_hash_10 truncate subpartition for (10, 'MAXVALUE', 9); + +alter table t_subpart_list_hash_1 truncate subpartition p1_subpartdefault1; +alter table t_subpart_list_hash_1 truncate subpartition for (10, 51); +alter table t_subpart_list_hash_10 truncate subpartition sp2; +alter table t_subpart_list_hash_10 truncate subpartition for (100, 9); + +alter table t_subpart_hash_hash_1 truncate subpartition p1_subpartdefault1; +alter table t_subpart_hash_hash_1 truncate subpartition for (11, 51); +alter table t_subpart_hash_hash_7 truncate subpartition sp1; +alter table t_subpart_hash_hash_7 truncate subpartition for (101, 10); +alter table t_subpart_hash_hash_10 truncate subpartition sp2; +alter table t_subpart_hash_hash_10 truncate subpartition for (1, 7); + + +-- ӷ +alter table t_subpart_range_hash_1 truncate partition p_error; +alter table t_subpart_range_hash_1 truncate partition for (300); +alter table t_subpart_range_hash_1 truncate subpartition sp_error; +alter table t_subpart_range_hash_1 truncate subpartition for (10, 4); -- ok + +alter table t_subpart_list_hash_1 truncate partition p_error; +alter table t_subpart_list_hash_1 truncate partition for (999); +alter table t_subpart_list_hash_1 truncate subpartition sp_error; +alter table t_subpart_list_hash_1 truncate subpartition for (10, 4); -- ok + +alter table t_subpart_hash_hash_1 truncate partition p_error; +alter table t_subpart_hash_hash_1 truncate partition for (4); -- ok +alter table t_subpart_hash_hash_1 truncate subpartition sp_error; +alter table t_subpart_hash_hash_1 truncate subpartition for (11, 4); -- ok + + + +---------------------------- +-- 沿ֻ﷨ļ򵥼飬coredump幦ܻ֧ +---------------------------- +-- TODO SET ӷģ +alter table t_subpart_range_hash_1 set subpartition template +( + subpartition sp1, + subpartition sp2 +); + +alter table t_subpart_list_hash_1 set subpartition template +( + subpartition sp1, + subpartition sp2 +); + +alter table t_subpart_hash_hash_1 set subpartition template +( + subpartition sp1, + subpartition sp2 +); + + + +---------------------------- +-- TODO SPLIT [SUB]PARTITION [FOR] +---------------------------- +-- TODO SPLIT RANGE PARTITION [FOR] +alter table t_subpart_range_hash_2 split partition p1 at (5) into (partition p1_1, partition p1_2); +alter table t_subpart_range_hash_2 split partition for (50) at (50) into (partition p2_1, partition p2_2); +-- oracleһķΧָ +alter table t_subpart_range_hash_2 split partition p3 into (partition p3_1 end (200), partition p3_2 end (300), partition p3_3 end (400), partition p3_4 end (500), partition p3_5 end (MAXVALUE)); +alter table t_subpart_range_hash_2 split partition for (50) into (partition p2_2_1 values less than (60), partition p2_2_2 values less than (100)); + +-- error, RANGE ֧ VALUES ... INTO +alter table t_subpart_range_hash_2 split partition p1_1 values (5) into (partition p1_1_1, partition p1_1_2); + + +-- TODO SPLIT LIST PARTITION [FOR] +alter table t_subpart_list_hash_2 split partition p1 values (5) into (partition p1_1, partition p1_2); +alter table t_subpart_list_hash_2 split partition for (100) values (200) into (partition p1_1, partition p1_2); +alter table t_subpart_list_hash_2 split partition p2 into (partition p3_1 values (10), partition p3_2 values (20), partition p3_3 values (30), partition p3_4 values (40), partition p3_5 values (50)); + +-- error, LIST ֧ AT ... INTO +alter table t_subpart_list_hash_2 split partition p1 at (3) into (partition p1_1, partition p1_2); +alter table t_subpart_list_hash_2 split partition for (3) at (3) into (partition p1_1 values (10), partition p1_2 values (20.6789)); + + +-- HASH ֧SPLIT +alter table t_subpart_hash_hash_2 split partition p1 at (5) into (partition p1_1, partition p1_2); +alter table t_subpart_hash_hash_2 split partition for (0) at (5) into (partition p1_1, partition p1_2); +alter table t_subpart_hash_hash_2 split partition p2 values (5) into (partition p1_1, partition p1_2); +alter table t_subpart_hash_hash_2 split partition p3 into (partition p3_1 values less than (100), partition p3_2 values less than (200)); + +-- HASH ӷ֧SPLIT +alter table t_subpart_hash_hash_3 split subpartition sp1 at ('a') into (subpartition sp1_1, subpartition sp1_2); +alter table t_subpart_hash_hash_3 split subpartition for (100, '1') at ('a') into (subpartition sp2_1, subpartition sp2_2 ); +alter table t_subpart_hash_hash_3 split subpartition sp1 values ('a') into (subpartition sp1_1, subpartition sp1_2); +alter table t_subpart_hash_hash_3 split subpartition for (100, '1') values ('1', '2') into (subpartition sp2_1, subpartition sp2_2 ); +alter table t_subpart_hash_hash_3 split subpartition sp3 into (subpartition sp3_1 values ('A'), subpartition sp3_2 values ('B'), subpartition sp3_3 values ('C'), subpartition sp3_4 values ('D', 'E')); +alter table t_subpart_hash_hash_3 split subpartition for (300, '1') into (subpartition sp5_1 values ('1', '2', '3', '4', '5'), subpartition sp5_2 values ('A', 'B', 'C', 'D', 'E'), subpartition sp5_3 values (DEFAULT)); + + +-- TODO HASHҲжӦIJַ﷨ + + +-- TODO SPLIT INDEX PARTITION + + + +---------------------------- +-- TODO MERGE [SUB]PARTITIONS [FOR] +-- MERGE TO ֻ֧Rangeϲڵз +---------------------------- +-- TODO MERGE RANGE PARTITIONS [FOR] +alter table t_subpart_range_hash_1 merge partitions p1,p2 into partition p12; +alter table t_subpart_range_hash_1 merge partitions for (1), for (10), for (100), for (200) into partition p1234; +alter table t_subpart_range_hash_1 merge partitions p1 to p4 into partition p1234; + +-- TODO MERGE LIST PARTITION [FOR] +alter table t_subpart_list_hash_1 merge partitions p1,p2 into partition p12; +alter table t_subpart_list_hash_1 merge partitions for (1), for (10), for (70), for (222) into partition p1234; + +alter table t_subpart_list_hash_1 merge partitions p1 to p4 into partition p1234; -- error + +-- TODO Hash֧MERGE﷨ +alter table t_subpart_hash_hash_1 merge partitions p1,p1 into partition p12; +alter table t_subpart_hash_hash_1 merge partitions for (0), for (1) into partition p12; +alter table t_subpart_hash_hash_1 merge partitions p1 to p3 into partition p123; + +alter table t_subpart_hash_hash_1 merge subpartitions sp1,sp1 into subpartition sp12; +alter table t_subpart_hash_hash_1 merge subpartitions for (1, 0), for (1, 1) into subpartition p12; +alter table t_subpart_hash_hash_1 merge subpartitions sp1 to sp2 into subpartition sp12; + + +-- TODO HASHҲжӦĺϲ﷨ + + + +---------------------------- +-- TODO EXCHANGE PARTITION [FOR] +---------------------------- +create table t_subpart_range_hash_8_exchange (like t_subpart_range_hash_8); +alter table t_subpart_range_hash_8_exchange add primary key (id, age, name); +create table t_subpart_list_hash_8_exchange (like t_subpart_list_hash_8); +alter table t_subpart_list_hash_8_exchange add primary key (id, age, name); +create table t_subpart_hash_hash_8_exchange (like t_subpart_hash_hash_8); +alter table t_subpart_hash_hash_8_exchange add primary key (id, age, name); + +alter table t_subpart_range_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_range_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_range_hash_8_exchange; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION p1 with table t_subpart_range_hash_8_exchange WITH VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION for (20, 'A') with table t_subpart_range_hash_8_exchange VERBOSE; +alter table t_subpart_range_hash_8 EXCHANGE PARTITION for (19, 'BBB') with table t_subpart_range_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_list_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_list_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_list_hash_8_exchange; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION p1 with table t_subpart_list_hash_8_exchange WITH VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION for (20) with table t_subpart_list_hash_8_exchange VERBOSE; +alter table t_subpart_list_hash_8 EXCHANGE PARTITION for (20)with table t_subpart_list_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_hash_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION (p1) with table t_subpart_hash_hash_8_exchange; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION p1 with table t_subpart_hash_hash_8_exchange WITH VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION for (10) with table t_subpart_hash_hash_8_exchange VERBOSE; +alter table t_subpart_hash_hash_8 EXCHANGE PARTITION for (10) with table t_subpart_hash_hash_8_exchange WITH VALIDATION VERBOSE; + + + +---------------------------- +-- EXCHANGE SUBPARTITION [FOR] +---------------------------- +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_range_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_range_hash_8_exchange; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_range_hash_8_exchange WITH VALIDATION; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION for (20, 'A', '10') with table t_subpart_range_hash_8_exchange VERBOSE; +alter table t_subpart_range_hash_8 EXCHANGE SUBPARTITION for (19, 'BBB', '10') with table t_subpart_range_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_list_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_list_hash_8_exchange; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_list_hash_8_exchange WITH VALIDATION; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION for (20, '20') with table t_subpart_list_hash_8_exchange VERBOSE; +alter table t_subpart_list_hash_8 EXCHANGE SUBPARTITION for (20, '20') with table t_subpart_list_hash_8_exchange WITH VALIDATION VERBOSE; + +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_hash_hash_8_exchange WITHOUT VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_hash_hash_8_exchange; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION p1_subpartdefault1 with table t_subpart_hash_hash_8_exchange WITH VALIDATION; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION for (10, '20') with table t_subpart_hash_hash_8_exchange VERBOSE; +alter table t_subpart_hash_hash_8 EXCHANGE SUBPARTITION for (10, '20') with table t_subpart_hash_hash_8_exchange WITH VALIDATION VERBOSE; + + +drop table t_subpart_range_hash_8_exchange; +drop table t_subpart_list_hash_8_exchange; +drop table t_subpart_hash_hash_8_exchange; + + + +-- TODO List MODIFY ADD/DROP VALUES (...) +-- alter table xxx modify partition p1 add values (); +-- alter table xxx modify partition p1 drop values (); +-- alter table xxx modify subpartition p1 add values (); +-- alter table xxx modify subpartition p1 addropd values (); + + + +---------------------------- +-- TODO MOVE [SUB]PARTITION [FOR] +---------------------------- +alter table t_subpart_range_hash_10 move partition p1 tablespace ts_subpart_hash_1; +alter table t_subpart_range_hash_10 move partition for (10, 'MAXVALUE') tablespace ts_subpart_hash_1; +alter table t_subpart_range_hash_10 move subpartition sp2 tablespace ts_subpart_hash_2; +alter table t_subpart_range_hash_10 move subpartition for (10, 'MAXVALUE', '1') tablespace ts_subpart_hash_2; + +alter table t_subpart_list_hash_10 move partition p1 tablespace ts_subpart_hash_1; +alter table t_subpart_list_hash_10 move partition for (100) tablespace ts_subpart_hash_1; +alter table t_subpart_list_hash_10 move subpartition sp2 tablespace ts_subpart_hash_2; +alter table t_subpart_list_hash_10 move subpartition for (100, '1') tablespace ts_subpart_hash_2; + +alter table t_subpart_hash_hash_10 move partition p1 tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_10 move partition for (1) tablespace ts_subpart_hash_1; +alter table t_subpart_hash_hash_10 move subpartition sp2 tablespace ts_subpart_hash_2; +alter table t_subpart_hash_hash_10 move subpartition for (1, '1') tablespace ts_subpart_hash_2; + + + +---------------------------- +-- TODO ROW MOVEMENT +---------------------------- +alter table t_subpart_range_hash_10 enable row movement; +alter table t_subpart_range_hash_10 disable row movement; + +alter table t_subpart_list_hash_10 enable row movement; +alter table t_subpart_list_hash_10 disable row movement; + +alter table t_subpart_hash_hash_10 enable row movement; +alter table t_subpart_hash_hash_10 disable row movement; + + + +---------------------------- +-- ALTER INDEX ... UNUSABLE +-- ALTER INDEX ... REBUILD +-- ALTER INDEX ... MODIFY [SUB]PARTITION name UNUSABLE +-- ALTER INDEX ... REBUILD [SUB]PARTITION name +---------------------------- +alter index i_t_subpart_hash_hash_10_3 UNUSABLE; +-- طֻһԻùؼPARTITIONSUBPARTITION +-- ָķDZķoracleв +alter index i_t_subpart_hash_hash_10_3 REBUILD partition subp1_bd_idx_local; +alter index i_t_subpart_hash_hash_10_3 REBUILD subpartition subp3_bd_idx_local; + +alter index i_t_subpart_hash_hash_10_4 UNUSABLE; +alter index i_t_subpart_hash_hash_10_4 REBUILD partition subp1_index_local; -- error +alter index i_t_subpart_hash_hash_10_4 REBUILD subpartition subp3_index_local; -- error + +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; +select relname, relkind, parttype, indisusable from pg_class left join pg_index on pg_class.oid=indexrelid where pg_class.oid in ('i_t_subpart_hash_hash_10_3'::regclass, 'i_t_subpart_hash_hash_10_4'::regclass) order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +-- alter index i_t_subpart_hash_hash_10_3 REBUILD; +-- alter index i_t_subpart_hash_hash_10_4 REBUILD; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter index i_t_subpart_hash_hash_10_3 modify partition subp1_bd_idx_local unusable; +alter index i_t_subpart_hash_hash_10_3 modify subpartition subp3_bd_idx_local unusable; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; +select relname, relkind, parttype, indisusable from pg_class left join pg_index on pg_class.oid=indexrelid where pg_class.oid in ('i_t_subpart_hash_hash_10_3'::regclass, 'i_t_subpart_hash_hash_10_4'::regclass) order by relname; + + + +---------------------------- +-- ALTER TABLE ... MODIFY [SUB]PARTITION [FOR] ... [REBUILD] UNUSABLE LOCAL INDEXES +---------------------------- +alter table t_subpart_hash_hash_10 modify partition p1 unusable local indexes; +alter table t_subpart_hash_hash_10 modify partition for (3) unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter table t_subpart_hash_hash_10 modify partition p1 REBUILD unusable local indexes; +alter table t_subpart_hash_hash_10 modify partition for (3) REBUILD unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter table t_subpart_hash_hash_10 modify subpartition sp1 unusable local indexes; +alter table t_subpart_hash_hash_10 modify subpartition for (3, NULL) unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + +alter table t_subpart_hash_hash_10 modify subpartition sp1 REBUILD unusable local indexes; +alter table t_subpart_hash_hash_10 modify subpartition for (3, NULL) REBUILD unusable local indexes; +select relname, parttype, indisusable from pg_partition where parentid='i_t_subpart_hash_hash_10_3'::regclass order by relname; + +explain (costs off) +select * from t_subpart_hash_hash_10 where bd = '2999-01-01'; + + + +---------------------------- +-- TODO RENAME +---------------------------- +alter table t_subpart_hash_hash_10 rename partition p2 to p0; +alter table t_subpart_hash_hash_10 rename partition p0 to p2; + +alter table t_subpart_hash_hash_10 rename subpartition sp2 to sp0; +alter table t_subpart_hash_hash_10 rename subpartition sp0 to sp2; + + + +-- TODO ӷ + +-- TODO ϵͳͼ +select table_name,partitioning_type,subpartitioning_type,partition_count, +def_subpartition_count,partitioning_key_count,subpartitioning_key_count +from all_part_tables where lower(table_name) in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11') order by table_name; + +select (table_owner is not null) as has_owner,table_name,partition_name,subpartition_name from all_tab_subpartitions where lower(table_name) in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11') order by table_name,partition_name,subpartition_name; + +select (table_owner is not null) as has_owner,table_name,partition_name,subpartition_count from all_tab_partitions where lower(table_name) in ('t_subpart_range_hash_11', 't_subpart_list_hash_11', 't_subpart_hash_hash_11') order by table_name,partition_name; + + + +-- TODO д +CREATE TABLE t_subpart_cstore_hh (id integer, name varchar(30), db date) +with ( orientation = column ) +partition by hash(id) +subpartition by hash(db) +( +partition p1 +); + + + +---------------------------- +-- ERROR +---------------------------- +-- VALUES֧ӷ +create table t_subpart_error (id integer, name varchar(30)) +partition by VALUES(id) +subpartition by hash(id); +-- ֧VALUESӷ +create table t_subpart_error (id integer, name varchar(30)) +partition by hash(id) +subpartition by VALUES(name) +( +partition p1 +); + +create table t_subpart_interval (id integer, name varchar(30), db date) +partition by range(db) +INTERVAL ('1 day') +subpartition by hash(id) +( +partition p1 values less than ('2000-01-01') +); +-- ݲ֧intervalӷ +create table t_subpart_error (id integer, name varchar(30), db date) +partition by hash(id) +subpartition by range(db) +INTERVAL ('1 day') +( +partition p1 +); + + + +-- 顰롱ķӷͷΪ0 +select oid,relname from pg_class +where (relkind = 'r' and parttype != 'n' and oid not in (select distinct parentid from pg_partition where parttype='r')) + or (relkind = 'i' and parttype != 'n' and oid not in (select distinct parentid from pg_partition where parttype='x')); + +select p1.relname, p1.parttype, p1.parentid, p1.boundaries +from pg_partition p1 +where (p1.parttype = 'r' and p1.parentid not in (select oid from pg_class where relkind = 'r' and parttype != 'n')) -- pg_classĿ + or (p1.parttype = 'r' and not exists (select oid from pg_partition where parttype='p' and parentid=p1.parentid)) -- ڷ + or (p1.parttype = 'p' and not exists (select oid from pg_partition where parttype='r' and parentid=p1.parentid)) -- + or (p1.parttype = 'p' and exists (select oid from pg_class where parttype='s' and oid=p1.parentid) and not exists (select oid from pg_partition where parttype='s' and parentid=p1.oid)) -- ķӷ + or (p1.parttype = 's' and not exists (select oid from pg_partition where parttype='p' and oid=p1.parentid)) -- ڸ + or (p1.parttype = 'x' and p1.parentid not in (select oid from pg_class where relkind = 'i' and parttype != 'n')) -- + or (p1.indextblid != 0 and p1.indextblid not in (select oid from pg_partition where parttype != 'r')); -- + +drop index i_t_subpart_hash_hash_10_3, i_t_subpart_hash_hash_10_4; + +-- +-- drop table t_subpart_normal_table_hash, t_subpart_part_table_hash; +-- drop schema schema_vastbase_subpartition_hash cascade; +-- drop tablespace ts_subpart_hash_1; +-- drop tablespace ts_subpart_hash_2; +-- drop tablespace ts_subpart_hash_test_user; +-- drop user user_subpart_hash; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_trigger.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_trigger.sql new file mode 100644 index 0000000000..0c11b1e25e --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_trigger.sql @@ -0,0 +1,109 @@ +create table employees(id int,salary int); + +create or replace trigger t +before insert or update of salary,id +or delete on employees +begin +case +when inserting then +dbms_output.put_line('inserting'); +when updating ('salary') then +dbms_output.put_line('updating salary'); +when updating ('id') then +dbms_output.put_line('updating id'); +when deleting then +dbms_output.put_line('deleting'); +end case; +end; +/ + +create table oldtab(id int,c1 char(8)); +create table newtab(id int,c1 int); + +create or replace trigger tri1 +after insert on oldtab +for each statement +begin +insert into newtab values(1,1),(2,2),(3,3); +end; +/ + +create or replace trigger tri2 +after update on oldtab +for each statement +begin +update newtab set c1=4 where id=2; +end; +/ + +create or replace trigger tri4 +after truncate on oldtab +for each statement +begin +insert into newtab values(4,4); +end; +/ + +create table oldtab2(id int,c1 char(8)); +create table newtab2(id int,c1 int); + +CREATE OR REPLACE FUNCTION func_tri21() +RETURNS TRIGGER AS $$ +BEGIN +insert into newtab2 values(1,1),(2,2),(3,3); +RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION func_tri22() +RETURNS TRIGGER AS $$ +BEGIN +update newtab2 set c1=4 where id=2; +RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION func_tri24() +RETURNS TRIGGER AS $$ +BEGIN +insert into newtab2 values(4,4); +RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +create trigger tri21 +after insert on oldtab2 +for each statement +execute procedure func_tri21(); + +create trigger tri22 +after update on oldtab2 +for each statement +execute procedure func_tri22(); + +create trigger tri24 +after truncate on oldtab2 +for each statement +execute procedure func_tri24(); + +create table t_trig_when(f1 boolean primary key, f2 text, f3 int, f4 date); +create or replace function dummy_update_func() returns trigger as $$ +begin + raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW; + return new; +end; +$$ language plpgsql; + +create trigger f1_trig_update after update of f1 on t_trig_when for each row when (not old.f1 and new.f1) execute procedure dummy_update_func('update'); + +alter table t_trig_when DISABLE TRIGGER f1_trig_update; +alter table t_trig_when ENABLE TRIGGER f1_trig_update; +alter table t_trig_when DISABLE TRIGGER f1_trig_update; +alter table t_trig_when ENABLE ALWAYS TRIGGER f1_trig_update; +alter table t_trig_when DISABLE TRIGGER ALL; +alter table t_trig_when ENABLE TRIGGER ALL; +alter table t_trig_when DISABLE TRIGGER USER ; +alter table t_trig_when ENABLE TRIGGER USER ; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_type.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_type.sql new file mode 100644 index 0000000000..458bf97c14 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_type.sql @@ -0,0 +1,2 @@ +create type atype as (id int, name text); +create type btype as object (id int, name text); \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_drop_type.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_drop_type.sql new file mode 100644 index 0000000000..348f776851 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_drop_type.sql @@ -0,0 +1,13 @@ +create table atable (id int, age int); + +create type atype as (id int, name text); +drop type atype; + +create type btype as object (id int, name text); +drop type btype; + +drop type typ_not_exit; + +drop type public.typ_not_exit; + +drop type schema_not_exit.typ_not_exit; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.setup new file mode 100644 index 0000000000..9a92a4a505 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.setup @@ -0,0 +1,20 @@ +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +tblspace="$subscription_dir/tmp_tblspace" +rm -rf $tblspace +mkdir -p $tblspace + + +tblspace_sub="$subscription_dir/tmp_tblspace_sub" +rm -rf $tblspace_sub +mkdir -p $tblspace_sub + +exec_sql $case_use_db $pub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts1 LOCATION '$tblspace/hw_subpartition_tablespace_ts1';" +exec_sql $case_use_db $pub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts2 LOCATION '$tblspace/hw_subpartition_tablespace_ts2';" +exec_sql $case_use_db $pub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts3 LOCATION '$tblspace/hw_subpartition_tablespace_ts3';" + +exec_sql $case_use_db $sub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts1 LOCATION '$tblspace_sub/hw_subpartition_tablespace_ts1';" +exec_sql $case_use_db $sub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts2 LOCATION '$tblspace_sub/hw_subpartition_tablespace_ts2';" +exec_sql $case_use_db $sub_node1_port "CREATE TABLESPACE hw_subpartition_tablespace_ts3 LOCATION '$tblspace_sub/hw_subpartition_tablespace_ts3';" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.sql new file mode 100644 index 0000000000..cf2588ea6d --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_subpartition_tablespace.sql @@ -0,0 +1,1052 @@ +--DROP SCHEMA hw_subpartition_tablespace CASCADE; +CREATE SCHEMA hw_subpartition_tablespace; +SET CURRENT_SCHEMA TO hw_subpartition_tablespace; + +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); +-- DROP TABLEt_range_range1; + +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); +-- DROP TABLEt_range_range2; + +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); +-- DROP TABLEt_range_list1; + +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); +-- DROP TABLEt_range_list2; + +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); +-- DROP TABLEt_range_hash1; + +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); +-- DROP TABLEt_range_hash2; + +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); +-- DROP TABLEt_list_range1; + +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); +-- DROP TABLEt_list_range2; + +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); +-- DROP TABLEt_list_list1; + +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); +-- DROP TABLEt_list_list2; + +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); +-- DROP TABLEt_list_hash1; + +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); +-- DROP TABLEt_list_hash2; + +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); +-- DROP TABLEt_hash_range1; + +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); +-- DROP TABLEt_hash_range2; + +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); +-- DROP TABLEt_hash_list1; + +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); +-- DROP TABLEt_hash_list2; + +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); +-- DROP TABLEt_hash_hash1; + +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); +-- DROP TABLEt_hash_hash2; + +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); +-- DROP TABLEt_range_list3; + + +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); +-- DROP TABLEt_range_list4; + +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); + +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); +-- DROP TABLEt_list_hash3; + +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); +-- DROP TABLEt_list_hash4; + +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); +-- DROP TABLEt_list_range3; + +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); +-- DROP TABLEt_list_range4; + +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); +-- DROP TABLEt_hash_list3; + +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); +-- DROP TABLEt_hash_list4; + +-- +----test create index with tablespace---- +-- +CREATE TABLE t_range_list(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (DEFAULT) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts2 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); + +CREATE INDEX t_range_list_idx ON t_range_list(c1,c2) LOCAL +( + PARTITION idx_p1( + SUBPARTITION idx_p1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION idx_p1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION idx_p1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION idx_p1_4 + ), + PARTITION idx_p2 TABLESPACE hw_subpartition_tablespace_ts2( + SUBPARTITION idx_p2_1, + SUBPARTITION idx_p2_2, + SUBPARTITION idx_p2_3 + ), + PARTITION idx_p3 TABLESPACE hw_subpartition_tablespace_ts2( + SUBPARTITION idx_p3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION idx_p3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION idx_p3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION idx_p3_4 + ), + PARTITION idx_p4( + SUBPARTITION idx_p4_1, + SUBPARTITION idx_p4_2 TABLESPACE hw_subpartition_tablespace_ts2 + ), + PARTITION idx_p5 TABLESPACE hw_subpartition_tablespace_ts3( + SUBPARTITION idx_p5_1 + ), + PARTITION idx_p6( + SUBPARTITION idx_p6_1 TABLESPACE hw_subpartition_tablespace_ts2 + ) +) TABLESPACE hw_subpartition_tablespace_ts1; + + +SELECT p.relname, t.spcname FROM pg_partition p, pg_class c, pg_namespace n, pg_tablespace t +WHERE p.parentid = c.oid + AND c.relname='t_range_list_idx' + AND c.relnamespace=n.oid + AND n.nspname=CURRENT_SCHEMA + AND p.reltablespace = t.oid +ORDER BY p.relname; + +SELECT pg_get_indexdef('hw_subpartition_tablespace.t_range_list_idx'::regclass); + +alter index t_range_list_idx rebuild; +alter index t_range_list_idx rebuild PARTITION idx_p2_1; + +create table t_settbs(id int,c1 text,c2 text); +create index idx_t_settbs_id on t_settbs(id); +alter table t_settbs SET TABLESPACE hw_subpartition_tablespace_ts1; + +alter table t_settbs ALTER INDEX idx_t_settbs_id INVISIBLE; +alter table t_settbs ALTER INDEX idx_t_settbs_id VISIBLE; + +alter table t_settbs ENABLE ROW LEVEL SECURITY; +alter table t_settbs DISABLE ROW LEVEL SECURITY; + +alter table t_settbs FORCE ROW LEVEL SECURITY; +alter table t_settbs NO FORCE ROW LEVEL SECURITY; + +drop TABLE if exists list_range_sales; +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); + +ALTER TABLE list_range_sales SPLIT SUBPARTITION channel2_customer2 AT (800) +INTO (SUBPARTITION channel2_customer3 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION channel2_customer4 TABLESPACE hw_subpartition_tablespace_ts2); + +drop TABLE if exists range_list_sales; +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 NOT NULL, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES (DEFAULT) + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); + +ALTER TABLE range_list_sales SPLIT SUBPARTITION customer2_channel2 VALUES (7) +INTO (SUBPARTITION customer2_channel2_0 TABLESPACE hw_subpartition_tablespace_ts2 , + SUBPARTITION customer2_channel2_1 TABLESPACE hw_subpartition_tablespace_ts2); + +-- DROP TABLEt_range_list; + +--finish +drop tablespace hw_subpartition_tablespace_ts1; +drop tablespace hw_subpartition_tablespace_ts2; +drop tablespace hw_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts3' + +DROP SCHEMA hw_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_view_def.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_view_def.sql new file mode 100644 index 0000000000..1a5917bad8 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_view_def.sql @@ -0,0 +1,36 @@ +CREATE TABLE tb_class (id INT,class_name TEXT); +INSERT INTO tb_class (id,class_name) VALUES (1,'class_1'); +INSERT INTO tb_class (id,class_name) VALUES (2,'class_2'); + +CREATE TABLE tb_student (id INT,student_name TEXT,class_id INT); +INSERT INTO tb_student (id,student_name,class_id) VALUES (1,'li lei',1); +INSERT INTO tb_student (id,student_name,class_id) VALUES (2,'han meimei',1); +INSERT INTO tb_student (id,student_name,class_id) VALUES (3,'zhang xiaoming',2); +INSERT INTO tb_student (id,student_name,class_id) VALUES (4,'wang peng',2); + +CREATE VIEW vw_class AS SELECT * FROM tb_class; +CREATE VIEW vw_student AS SELECT * FROM tb_student; +CREATE VIEW vw_class_student AS SELECT c.class_name,s.student_name FROM tb_class c JOIN tb_student s ON c.id = s.class_id; +CREATE VIEW vw_class_1_student AS SELECT c.class_name,s.student_name FROM tb_class c JOIN tb_student s ON c.id = s.class_id WHERE c.id = 1; + +CREATE TABLE tb_order (id INT,order_product TEXT,order_time timestamptz); +INSERT INTO tb_order (id,order_product) VALUES (1,'football'); +INSERT INTO tb_order (id,order_product) VALUES (2,'baskball'); + +CREATE VIEW vw_order AS SELECT * FROM tb_order; +ALTER VIEW vw_order ALTER COLUMN order_time SET DEFAULT now(); + + +CREATE TABLE tb_address (id INT,address TEXT); +INSERT INTO tb_address (id,address) VALUES (1,'a_address'); +INSERT INTO tb_address (id,address) VALUES (2,'b_address'); + +CREATE VIEW vw_address AS SELECT * FROM tb_address; +ALTER VIEW vw_address RENAME TO vw_address_new; + +CREATE TABLE tb_book (id INT,book_name TEXT); +INSERT INTO tb_book (id,book_name) VALUES (1,'englisen'); +INSERT INTO tb_book (id,book_name) VALUES (2,'math'); + +CREATE VIEW vw_book AS SELECT * FROM tb_book; +DROP VIEW vw_book; \ No newline at end of file diff --git a/src/test/subscription/testcase/dump_expected/dump_db_puball.pub b/src/test/subscription/testcase/dump_expected/dump_db_puball.pub index db2ca2f77e..31a00f689f 100644 --- a/src/test/subscription/testcase/dump_expected/dump_db_puball.pub +++ b/src/test/subscription/testcase/dump_expected/dump_db_puball.pub @@ -9,7 +9,6 @@ SET standard_conforming_strings = on; SET check_function_bodies = false; SET session_replication_role = replica; SET client_min_messages = warning; -SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length'; SET search_path = public; @@ -44,7 +43,7 @@ COPY public.test1 (a, b) FROM stdin; -- Name: mypub; Type: PUBLICATION; Schema: -; Owner: gauss -- -CREATE PUBLICATION mypub FOR ALL TABLES WITH (publish = 'insert, update, delete',ddl = 'all'); +CREATE PUBLICATION mypub FOR ALL TABLES WITH (publish = 'insert, update, delete, truncate',ddl = 'all'); ALTER PUBLICATION mypub OWNER TO gauss; diff --git a/src/test/subscription/testcase/dump_expected/dump_db_puball.sub b/src/test/subscription/testcase/dump_expected/dump_db_puball.sub index 197bc05ea4..94bafc4483 100644 --- a/src/test/subscription/testcase/dump_expected/dump_db_puball.sub +++ b/src/test/subscription/testcase/dump_expected/dump_db_puball.sub @@ -9,7 +9,6 @@ SET standard_conforming_strings = on; SET check_function_bodies = false; SET session_replication_role = replica; SET client_min_messages = warning; -SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length'; SET search_path = public; diff --git a/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub index 52c1c44159..f129e0789b 100644 --- a/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub +++ b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub @@ -9,7 +9,6 @@ SET standard_conforming_strings = on; SET check_function_bodies = false; SET session_replication_role = replica; SET client_min_messages = warning; -SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length'; SET search_path = public; diff --git a/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub index 197bc05ea4..94bafc4483 100644 --- a/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub +++ b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub @@ -9,7 +9,6 @@ SET standard_conforming_strings = on; SET check_function_bodies = false; SET session_replication_role = replica; SET client_min_messages = warning; -SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length'; SET search_path = public; -- Gitee From cdb8f4d2774d717f3abfa9b1761b52e6fe0eb177 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Tue, 6 Aug 2024 21:41:22 +0800 Subject: [PATCH 163/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3timescaledb=E6=8F=92?= =?UTF-8?q?=E4=BB=B6,=E6=97=B6=E5=BA=8F=E8=A1=A8=E6=9F=A5=E7=9C=8B?= =?UTF-8?q?=E6=8C=87=E5=AE=9A=E7=9A=84SQL=E6=89=A7=E8=A1=8C=E8=AE=A1?= =?UTF-8?q?=E5=88=92=E4=B8=8D=E5=87=86=E7=A1=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/ruleutils.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 74732fedb6..2fcbea44f9 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -8779,6 +8779,11 @@ static char* get_variable( schemaname = NULL; /* default assumptions */ refname = rte->eref->aliasname; + if (NULL != rte->relname && u_sess->hook_cxt.forTsdbHook) { + rte->relname = get_rel_name(rte->relid); + rte->eref->aliasname = rte->relname; + } + /* Exceptions occur only if the RTE is alias-less */ if (rte->alias == NULL) { if (rte->rtekind == RTE_RELATION || no_alias) { -- Gitee From b7c056bd95b5795c199bb0d617489236346d017f Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Fri, 9 Aug 2024 11:16:28 +0800 Subject: [PATCH 164/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Djdbc=20insert=20?= =?UTF-8?q?=E7=A9=BA=E5=80=BC=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/copyfuncs.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 1bf48d267e..5230e752bf 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -2576,7 +2576,7 @@ static Const* _copyConst(const Const* from) COPY_SCALAR_FIELD(constcollid); COPY_SCALAR_FIELD(constlen); - if (from->constbyval || from->constisnull) { + if (from->constbyval || from->constisnull || from->constlen == 0) { /* * passed by value so just copy the datum. Also, don't try to copy * struct when value is null! -- Gitee From a36694a29fc8608b56fb2f8f7c525ffb2f3b156b Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Sat, 10 Aug 2024 09:12:47 +0800 Subject: [PATCH 165/347] =?UTF-8?q?=E6=9B=B4=E6=96=B0server=E5=8C=85?= =?UTF-8?q?=E5=90=8D=E5=90=8E=E7=BC=80=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- build/script/utils/common.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/script/utils/common.sh b/build/script/utils/common.sh index af3d3dcb7b..2d64317f05 100644 --- a/build/script/utils/common.sh +++ b/build/script/utils/common.sh @@ -148,6 +148,6 @@ declare release_file_list="${PLATFORM_ARCH}_${product_mode}_list" declare package_version_name="${version_number}-${os_name}${os_version}-${PLATFORM_ARCH}" declare libpq_package_name="${product_name}-Libpq-${package_version_name}.tar.gz" declare tools_package_name="${product_name}-Tools-${package_version_name}.tar.gz" -declare kernel_package_name="${product_name}-Server-${package_version_name}.tar.gz" +declare kernel_package_name="${product_name}-Server-${package_version_name}.tar.bz2" declare symbol_package_name="${product_name}-Symbol-${package_version_name}.tar.gz" declare sha256_name="${product_name}-Server-${package_version_name}.sha256" -- Gitee From b8992f5cff23dcc0691e58f9c56313b4d013482c Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Wed, 7 Aug 2024 18:34:01 +0800 Subject: [PATCH 166/347] =?UTF-8?q?=E5=9C=A8=E8=AF=8D=E6=B3=95=E4=B8=AD?= =?UTF-8?q?=E6=8F=90=E5=89=8D=E5=88=A4=E6=96=AD=20=E6=B5=AE=E7=82=B9?= =?UTF-8?q?=E6=95=B0=E5=B8=B8=E9=87=8F=E6=98=AF=E5=90=A6=E6=BA=A2=E5=87=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit (cherry picked from commit 8c1c0e4dd093fc885bcea9fec4e6b1db1c0e4688) --- src/common/backend/parser/scan.l | 46 ++- .../expected/expr_nantest_infinitetest.out | 4 +- src/test/regress/expected/float_literals.out | 268 ++++++++++++++++++ .../regress/expected/func_to_binary_float.out | 122 ++++---- src/test/regress/expected/hw_datatype.out | 20 +- src/test/regress/expected/hw_datatype_2.out | 20 +- .../output/copy_support_transform.source | 5 +- src/test/regress/sql/float_literals.sql | 76 +++++ src/test/regress/sql/func_to_binary_float.sql | 2 + 9 files changed, 488 insertions(+), 75 deletions(-) create mode 100644 src/test/regress/expected/float_literals.out create mode 100644 src/test/regress/sql/float_literals.sql diff --git a/src/common/backend/parser/scan.l b/src/common/backend/parser/scan.l index 8fa0db4d43..f258dd91a3 100755 --- a/src/common/backend/parser/scan.l +++ b/src/common/backend/parser/scan.l @@ -114,6 +114,7 @@ static void addunicode(pg_wchar c, yyscan_t yyscanner); static void set_is_delimiter_name(char* text, core_yyscan_t yyscanner ); static int process_decimal_float(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); static int process_decimal_double(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); +static char *handle_float_overflow(char *token, core_yyscan_t yyscanner); #define yyerror(msg) scanner_yyerror(msg, yyscanner) @@ -1040,7 +1041,9 @@ other . } {decimal} { SET_YYLLOC(); - yylval->str = pstrdup(yytext); + char* val = pstrdup(yytext); + val = handle_float_overflow(val, yyscanner); + yylval->str = pstrdup(val); yyextra->is_hint_str = false; return FCONST; } @@ -1109,7 +1112,9 @@ other . } {real} { SET_YYLLOC(); - yylval->str = pstrdup(yytext); + char* val = pstrdup(yytext); + val = handle_float_overflow(val, yyscanner); + yylval->str = pstrdup(val); yyextra->is_hint_str = false; return FCONST; } @@ -1600,7 +1605,9 @@ process_decimal_float(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lv { startlit(); addlit(token, len-1, yyscanner); - lval->str = litbufdup(yyscanner); + char *val = litbufdup(yyscanner); + val = handle_float_overflow(val, yyscanner); + lval->str = val; return FCONST_F; } @@ -1609,10 +1616,41 @@ process_decimal_double(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *l { startlit(); addlit(token, len-1, yyscanner); - lval->str = litbufdup(yyscanner); + char *val = litbufdup(yyscanner); + val = handle_float_overflow(val, yyscanner); + lval->str = val; return FCONST_D; } +/* prejudge whether float const overflow */ +static char * +handle_float_overflow(char *token, core_yyscan_t yyscanner) +{ + + double val; + char *endptr; + char *result = token; + + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + return result; + } + + errno = 0; + val = strtod(result, &endptr); + + // if val < 1E-130, we assume it is 0 + if (val != 0 && log10(val) < -130) { + result = "0"; + } else if (errno == ERANGE) { + if (val == 0.0) { + result = "0"; + } else { + yyerror("number overflow"); + } + } + return result; +} + static unsigned int hexval(unsigned char c) { diff --git a/src/test/regress/expected/expr_nantest_infinitetest.out b/src/test/regress/expected/expr_nantest_infinitetest.out index 687a4e3dbf..156b093826 100644 --- a/src/test/regress/expected/expr_nantest_infinitetest.out +++ b/src/test/regress/expected/expr_nantest_infinitetest.out @@ -1646,7 +1646,9 @@ EXECUTE isnan_num(11, 1.23E-100); EXECUTE isnan_num(12, -1.79E+100); EXECUTE isnan_num(13, 1.79E+100); EXECUTE isnan_num(14, 1.79E+400); -ERROR: "17900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision +ERROR: number overflow at or near "1.79E+400" +LINE 1: EXECUTE isnan_num(14, 1.79E+400); + ^ EXECUTE isnan_num(15, CAST('NaN' as float8)); EXECUTE isnan_num(16, CAST('Inf' as float8)); SELECT * FROM tnf2 ORDER BY c1; diff --git a/src/test/regress/expected/float_literals.out b/src/test/regress/expected/float_literals.out new file mode 100644 index 0000000000..3864c7a739 --- /dev/null +++ b/src/test/regress/expected/float_literals.out @@ -0,0 +1,268 @@ +-- test float literals overflow in dbcompatibility A +create database float_literals dbcompatibility 'A'; +\c float_literals +SELECT 0.0; + ?column? +---------- + 0.0 +(1 row) + +SELECT -0.0; + ?column? +---------- + 0.0 +(1 row) + +SELECT 3.142596; + ?column? +---------- + 3.142596 +(1 row) + +SELECT -3.142596; + ?column? +----------- + -3.142596 +(1 row) + +SELECT 1.79E+400; +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT 1.79E+400; + ^ +SELECT 1.79E-400; + ?column? +---------- + 0 +(1 row) + +SELECT -1.79E+400; +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT -1.79E+400; + ^ +SELECT '0.0'; + ?column? +---------- + 0.0 +(1 row) + +SELECT '-0.0'; + ?column? +---------- + -0.0 +(1 row) + +SELECT '3.142596'; + ?column? +---------- + 3.142596 +(1 row) + +SELECT '-3.142596'; + ?column? +----------- + -3.142596 +(1 row) + +SELECT '1.79E+400'; + ?column? +----------- + 1.79E+400 +(1 row) + +SELECT '1.79E-400'; + ?column? +----------- + 1.79E-400 +(1 row) + +SELECT '-1.79E+400'; + ?column? +------------ + -1.79E+400 +(1 row) + +SELECT '0.0'::float8; + float8 +-------- + 0 +(1 row) + +SELECT '-0.0'::float8; + float8 +-------- + 0 +(1 row) + +SELECT '3.142596'::float8; + float8 +---------- + 3.142596 +(1 row) + +SELECT '-3.142596'::float8; + float8 +----------- + -3.142596 +(1 row) + +SELECT '1.79E+400'::float8; +ERROR: "1.79E+400" is out of range for type double precision +LINE 1: SELECT '1.79E+400'::float8; + ^ +CONTEXT: referenced column: float8 +SELECT '1.79E-400'::float8; +ERROR: "1.79E-400" is out of range for type double precision +LINE 1: SELECT '1.79E-400'::float8; + ^ +CONTEXT: referenced column: float8 +SELECT '-1.79E+400'::float8; +ERROR: "-1.79E+400" is out of range for type double precision +LINE 1: SELECT '-1.79E+400'::float8; + ^ +CONTEXT: referenced column: float8 +SELECT TO_BINARY_FLOAT(3.14 DEFAULT y ON CONVERSION ERROR); +ERROR: column "y" does not exist +LINE 1: SELECT TO_BINARY_FLOAT(3.14 DEFAULT y ON CONVERSION ERROR); + ^ +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(-3.14 DEFAULT + ON CONVERSION ERROR); +ERROR: syntax error at or near "ON CONVERSION ERROR" +LINE 1: SELECT TO_BINARY_FLOAT(-3.14 DEFAULT + ON CONVERSION ERROR); + ^ +SELECT TO_BINARY_FLOAT(3.14 DEFAULT - ON CONVERSION ERROR); +ERROR: syntax error at or near "ON CONVERSION ERROR" +LINE 1: SELECT TO_BINARY_FLOAT(3.14 DEFAULT - ON CONVERSION ERROR); + ^ +SELECT TO_BINARY_FLOAT(-3.14 DEFAULT * ON CONVERSION ERROR); +ERROR: syntax error at or near "*" +LINE 1: SELECT TO_BINARY_FLOAT(-3.14 DEFAULT * ON CONVERSION ERROR); + ^ +SELECT TO_BINARY_FLOAT(3.14 DEFAULT / ON CONVERSION ERROR); +ERROR: syntax error at or near "/" +LINE 1: SELECT TO_BINARY_FLOAT(3.14 DEFAULT / ON CONVERSION ERROR); + ^ +SELECT TO_BINARY_FLOAT('3.14' DEFAULT y ON CONVERSION ERROR); +ERROR: column "y" does not exist +LINE 1: SELECT TO_BINARY_FLOAT('3.14' DEFAULT y ON CONVERSION ERROR)... + ^ +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('-3.14' DEFAULT + ON CONVERSION ERROR); +ERROR: syntax error at or near "ON CONVERSION ERROR" +LINE 1: SELECT TO_BINARY_FLOAT('-3.14' DEFAULT + ON CONVERSION ERROR... + ^ +SELECT TO_BINARY_FLOAT('3.14' DEFAULT - ON CONVERSION ERROR); +ERROR: syntax error at or near "ON CONVERSION ERROR" +LINE 1: SELECT TO_BINARY_FLOAT('3.14' DEFAULT - ON CONVERSION ERROR)... + ^ +SELECT TO_BINARY_FLOAT('-3.14' DEFAULT * ON CONVERSION ERROR); +ERROR: syntax error at or near "*" +LINE 1: SELECT TO_BINARY_FLOAT('-3.14' DEFAULT * ON CONVERSION ERROR... + ^ +SELECT TO_BINARY_FLOAT('3.14' DEFAULT / ON CONVERSION ERROR); +ERROR: syntax error at or near "/" +LINE 1: SELECT TO_BINARY_FLOAT('3.14' DEFAULT / ON CONVERSION ERROR)... + ^ +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT y ON CONVERSION ERROR); +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT y ON CONVERSION ERR... + ^ +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT + ON CONVERSION ERROR); +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT + ON CONVERSION ERR... + ^ +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT - ON CONVERSION ERROR); +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT - ON CONVERSION ERR... + ^ +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT * ON CONVERSION ERROR); +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT * ON CONVERSION ERR... + ^ +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT / ON CONVERSION ERROR); +ERROR: number overflow at or near "1.79E+400" +LINE 1: SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT / ON CONVERSION ERR... + ^ +SELECT TO_BINARY_FLOAT('1.79E+400' DEFAULT y ON CONVERSION ERROR); +ERROR: column "y" does not exist +LINE 1: SELECT TO_BINARY_FLOAT('1.79E+400' DEFAULT y ON CONVERSION E... + ^ +CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT('1.79E-400' DEFAULT + ON CONVERSION ERROR); +ERROR: syntax error at or near "ON CONVERSION ERROR" +LINE 1: SELECT TO_BINARY_FLOAT('1.79E-400' DEFAULT + ON CONVERSION E... + ^ +SELECT TO_BINARY_FLOAT('-1.79E+400' DEFAULT - ON CONVERSION ERROR); +ERROR: syntax error at or near "ON CONVERSION ERROR" +LINE 1: SELECT TO_BINARY_FLOAT('-1.79E+400' DEFAULT - ON CONVERSION ... + ^ +SELECT TO_BINARY_FLOAT('-1.79E-400' DEFAULT * ON CONVERSION ERROR); +ERROR: syntax error at or near "*" +LINE 1: SELECT TO_BINARY_FLOAT('-1.79E-400' DEFAULT * ON CONVERSION ... + ^ +SELECT TO_BINARY_FLOAT('1.79E+400' DEFAULT / ON CONVERSION ERROR); +ERROR: syntax error at or near "/" +LINE 1: SELECT TO_BINARY_FLOAT('1.79E+400' DEFAULT / ON CONVERSION E... + ^ +CREATE TABLE t_float_literals (id int, c1 float8); +INSERT INTO t_float_literals VALUES (1, 0.0); +INSERT INTO t_float_literals VALUES (2, 3.14); +INSERT INTO t_float_literals VALUES (3, 3.14E+40); +INSERT INTO t_float_literals VALUES (4, -3.14E+40); +INSERT INTO t_float_literals VALUES (5, '3.14E+40'::float8); +INSERT INTO t_float_literals VALUES (6, '-3.14E+40'::float8); +INSERT INTO t_float_literals VALUES (7, 3.14E+400); +ERROR: number overflow at or near "3.14E+400" +LINE 1: INSERT INTO t_float_literals VALUES (7, 3.14E+400); + ^ +INSERT INTO t_float_literals VALUES (8, 3.14E-400); +INSERT INTO t_float_literals VALUES (9, -3.14E+400); +ERROR: number overflow at or near "3.14E+400" +LINE 1: INSERT INTO t_float_literals VALUES (9, -3.14E+400); + ^ +INSERT INTO t_float_literals VALUES (10, '3.14E+400'::float8); +ERROR: "3.14E+400" is out of range for type double precision +LINE 1: INSERT INTO t_float_literals VALUES (10, '3.14E+400'::float8... + ^ +INSERT INTO t_float_literals VALUES (11, '3.14E-400'::float8); +ERROR: "3.14E-400" is out of range for type double precision +LINE 1: INSERT INTO t_float_literals VALUES (11, '3.14E-400'::float8... + ^ +INSERT INTO t_float_literals VALUES (12, '-3.14E+400'::float8); +ERROR: "-3.14E+400" is out of range for type double precision +LINE 1: INSERT INTO t_float_literals VALUES (12, '-3.14E+400'::float... + ^ +SELECT * FROM t_float_literals ORDER bY id; + id | c1 +----+----------- + 1 | 0 + 2 | 3.14 + 3 | 3.14e+40 + 4 | -3.14e+40 + 5 | 3.14e+40 + 6 | -3.14e+40 + 8 | 0 +(7 rows) + +UPDATE t_float_iterals SET c1 = 1.79E+400 WHERE id = 1; +ERROR: number overflow at or near "1.79E+400" +LINE 1: UPDATE t_float_iterals SET c1 = 1.79E+400 WHERE id = 1; + ^ +UPDATE t_float_iterals SET c1 = '1.79E+400'::float8 WHERE id = 2; +ERROR: relation "t_float_iterals" does not exist on datanode1 +LINE 1: UPDATE t_float_iterals SET c1 = '1.79E+400'::float8 WHERE id... + ^ +UPDATE t_float_iterals SET c1 = 1.79E+40 WHERE id = 3; +ERROR: relation "t_float_iterals" does not exist on datanode1 +LINE 1: UPDATE t_float_iterals SET c1 = 1.79E+40 WHERE id = 3; + ^ +UPDATE t_float_iterals SET c1 = '1.79E+40'::float8 WHERE id = 4; +ERROR: relation "t_float_iterals" does not exist on datanode1 +LINE 1: UPDATE t_float_iterals SET c1 = '1.79E+40'::float8 WHERE id ... + ^ +SELECT * FROM t_float_iterals ORDER BY c1; +ERROR: relation "t_float_iterals" does not exist on datanode1 +LINE 1: SELECT * FROM t_float_iterals ORDER BY c1; + ^ +drop table t_float_literals; +\c regression +drop database float_literals; diff --git a/src/test/regress/expected/func_to_binary_float.out b/src/test/regress/expected/func_to_binary_float.out index 3fcd424021..2b8786d860 100644 --- a/src/test/regress/expected/func_to_binary_float.out +++ b/src/test/regress/expected/func_to_binary_float.out @@ -102,8 +102,9 @@ SELECT TO_BINARY_FLOAT(2.22507485850720E-100); (1 row) SELECT TO_BINARY_FLOAT(1.79769313486231E+310); -- error: overflow -ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "1.79769313486231E+310" +LINE 1: SELECT TO_BINARY_FLOAT(1.79769313486231E+310); + ^ SELECT TO_BINARY_FLOAT('1.79769313486231E+100'); to_binary_float ----------------- @@ -459,10 +460,9 @@ SELECT TO_BINARY_FLOAT('test' DEFAULT 3.04E+100 ON CONVERSION ERROR); -- test default column SELECT TO_BINARY_FLOAT(1.79E+309 DEFAULT y ON CONVERSION ERROR); -ERROR: column "y" does not exist +ERROR: number overflow at or near "1.79E+309" LINE 1: SELECT TO_BINARY_FLOAT(1.79E+309 DEFAULT y ON CONVERSION ERR... - ^ -CONTEXT: referenced column: to_binary_float + ^ SELECT TO_BINARY_FLOAT(c3 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; to_binary_float ----------------- @@ -532,11 +532,13 @@ SELECT TO_BINARY_FLOAT(3.14E+100 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER (8 rows) SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- overflow -ERROR: "31400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "3.14E+400" +LINE 1: SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c3 ON CONVERSION ER... + ^ SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- overflow -ERROR: "31400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "3.14E+400" +LINE 1: SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT c4 ON CONVERSION ER... + ^ SELECT TO_BINARY_FLOAT('3.14' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error ERROR: default argument must be a literal or bind CONTEXT: referenced column: to_binary_float @@ -561,6 +563,14 @@ CONTEXT: referenced column: to_binary_float SELECT TO_BINARY_FLOAT(NULL DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error ERROR: default argument must be a literal or bind CONTEXT: referenced column: to_binary_float +SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT ab ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- overflow +ERROR: number overflow at or near "3.14E+400" +LINE 1: SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT ab ON CONVERSION ER... + ^ +SELECT TO_BINARY_FLOAT('3.14E+400 DEFAULT' ab ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error +ERROR: syntax error at or near "ab" +LINE 1: SELECT TO_BINARY_FLOAT('3.14E+400 DEFAULT' ab ON CONVERSION ... + ^ -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); to_binary_float @@ -587,8 +597,9 @@ SELECT TO_BINARY_FLOAT('2.22507485850720E-100' DEFAULT 3.14 ON CONVERSION ERROR) (1 row) SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT 3.14 ON CONVERSION ERROR); -- error: overflow -ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "1.79769313486231E+310" +LINE 1: SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT 3.14 ON... + ^ SELECT TO_BINARY_FLOAT('1.79769313486231E+310' DEFAULT 3.14 ON CONVERSION ERROR); -- inf to_binary_float ----------------- @@ -608,8 +619,9 @@ SELECT TO_BINARY_FLOAT(3.14 DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR) (1 row) SELECT TO_BINARY_FLOAT(3.14 DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow -ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "1.79769313486231E+310" +LINE 1: SELECT TO_BINARY_FLOAT(3.14 DEFAULT 1.79769313486231E+310 ON... + ^ SELECT TO_BINARY_FLOAT(3.14 DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); to_binary_float ----------------- @@ -629,8 +641,9 @@ SELECT TO_BINARY_FLOAT('1.79769313486231E+100' DEFAULT NULL ON CONVERSION ERROR) (1 row) SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT NULL ON CONVERSION ERROR); -- error: overflow -ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "1.79769313486231E+310" +LINE 1: SELECT TO_BINARY_FLOAT(1.79769313486231E+310 DEFAULT NULL ON... + ^ SELECT TO_BINARY_FLOAT('1.79769313486231E+310' DEFAULT NULL ON CONVERSION ERROR); -- inf to_binary_float ----------------- @@ -650,8 +663,9 @@ SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+100' ON CONVERSION ERROR) (1 row) SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+310 ON CONVERSION ERROR); -- error: overflow -ERROR: "17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: to_binary_float +ERROR: number overflow at or near "1.79769313486231E+310" +LINE 1: SELECT TO_BINARY_FLOAT(NULL DEFAULT 1.79769313486231E+310 ON... + ^ SELECT TO_BINARY_FLOAT(NULL DEFAULT '1.79769313486231E+310' ON CONVERSION ERROR); -- NULL to_binary_float ----------------- @@ -746,9 +760,21 @@ EXECUTE default_param_text2(13, NULL, 3.14); EXECUTE default_param_text2(14, 1.79769313486231E+100, 3.14); EXECUTE default_param_text2(15, 3.14, 1.79769313486231E+100); EXECUTE default_param_text2(16, 1.79769313486231E+400, 3.14); +ERROR: number overflow at or near "1.79769313486231E+400" +LINE 1: EXECUTE default_param_text2(16, 1.79769313486231E+400, 3.14)... + ^ EXECUTE default_param_text2(17, 3.14, 1.79769313486231E+400); +ERROR: number overflow at or near "1.79769313486231E+400" +LINE 1: EXECUTE default_param_text2(17, 3.14, 1.79769313486231E+400)... + ^ EXECUTE default_param_text2(18, 1.79769313486231E+400, NULL); +ERROR: number overflow at or near "1.79769313486231E+400" +LINE 1: EXECUTE default_param_text2(18, 1.79769313486231E+400, NULL)... + ^ EXECUTE default_param_text2(19, NULL, 1.79769313486231E+400); +ERROR: number overflow at or near "1.79769313486231E+400" +LINE 1: EXECUTE default_param_text2(19, NULL, 1.79769313486231E+400)... + ^ PREPARE default_param_text_num(int, text, float8) AS INSERT INTO t2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); ERROR: relation "t2" does not exist on datanode1 LINE 1: ..._param_text_num(int, text, float8) AS INSERT INTO t2 VALUES ... @@ -759,43 +785,43 @@ EXECUTE default_param_text2(22, '1.79769313486231E+100', 6.666666); EXECUTE default_param_text2(23, '6.666666', 1.79769313486231E+100); EXECUTE default_param_text2(24, '1.79769313486231E+400', 6.666666); EXECUTE default_param_text2(25, '6.666666', 1.79769313486231E+400); +ERROR: number overflow at or near "1.79769313486231E+400" +LINE 1: EXECUTE default_param_text2(25, '6.666666', 1.79769313486231... + ^ PREPARE default_param_num_text(int, float8, text) AS INSERT INTO tbf2 VALUES ($1, CONCAT('TO_BINARY_FLOAT(', $2, ' DEFAULT ', $3, ' ON CONVERSION ERROR)'), TO_BINARY_FLOAT($2 DEFAULT $3 ON CONVERSION ERROR)); EXECUTE default_param_text2(26, 1.79769313486231E+100, '6.666666'); EXECUTE default_param_text2(27, 6.666666, '1.79769313486231E+100'); EXECUTE default_param_text2(28, 1.79769313486231E+400, '6.666666'); +ERROR: number overflow at or near "1.79769313486231E+400" +LINE 1: EXECUTE default_param_text2(28, 1.79769313486231E+400, '6.66... + ^ EXECUTE default_param_text2(29, 6.666666, '1.79769313486231E+400'); SELECT * FROM tbf2 ORDER BY c1; - c1 | func_info | res -----+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------- - 1 | TO_BINARY_FLOAT(3.14 DEFAULT 6.66 ON CONVERSION ERROR) | 3.14 - 2 | TO_BINARY_FLOAT(3.14# DEFAULT 6.66 ON CONVERSION ERROR) | 6.66 - 4 | TO_BINARY_FLOAT( -3.14 DEFAULT 6.66 ON CONVERSION ERROR) | -3.14 - 5 | TO_BINARY_FLOAT( DEFAULT 6.66 ON CONVERSION ERROR) | - 6 | TO_BINARY_FLOAT(6.66 DEFAULT ON CONVERSION ERROR) | 6.66 - 7 | TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT ON CONVERSION ERROR) | Infinity - 8 | TO_BINARY_FLOAT( DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR) | - 9 | TO_BINARY_FLOAT(1.79769313486231E+400 DEFAULT ON CONVERSION ERROR) | Infinity - 10 | TO_BINARY_FLOAT( DEFAULT 1.79769313486231E+400 ON CONVERSION ERROR) | - 11 | TO_BINARY_FLOAT(3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 - 12 | TO_BINARY_FLOAT(3.14 DEFAULT ON CONVERSION ERROR) | 3.14 - 13 | TO_BINARY_FLOAT( DEFAULT 3.14 ON CONVERSION ERROR) | - 14 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 3.14 ON CONVERSION ERROR) | Infinity - 15 | TO_BINARY_FLOAT(3.14 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 3.14 - 16 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 3.14 ON CONVERSION ERROR) | Infinity - 17 | TO_BINARY_FLOAT(3.14 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 3.14 - 18 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT ON CONVERSION ERROR) | Infinity - 19 | TO_BINARY_FLOAT( DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | - 20 | TO_BINARY_FLOAT(3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 - 21 | TO_BINARY_FLOAT( +3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 - 22 | TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity - 23 | TO_BINARY_FLOAT(6.666666 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 6.66667 - 24 | TO_BINARY_FLOAT(1.79769313486231E+400 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity - 25 | TO_BINARY_FLOAT(6.666666 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 6.66667 - 26 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity - 27 | TO_BINARY_FLOAT(6.666666 DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR) | 6.66667 - 28 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity - 29 | TO_BINARY_FLOAT(6.666666 DEFAULT 1.79769313486231E+400 ON CONVERSION ERROR) | 6.66667 -(28 rows) + c1 | func_info | res +----+-------------------------------------------------------------------------------------------------------------------------------------------------------------+---------- + 1 | TO_BINARY_FLOAT(3.14 DEFAULT 6.66 ON CONVERSION ERROR) | 3.14 + 2 | TO_BINARY_FLOAT(3.14# DEFAULT 6.66 ON CONVERSION ERROR) | 6.66 + 4 | TO_BINARY_FLOAT( -3.14 DEFAULT 6.66 ON CONVERSION ERROR) | -3.14 + 5 | TO_BINARY_FLOAT( DEFAULT 6.66 ON CONVERSION ERROR) | + 6 | TO_BINARY_FLOAT(6.66 DEFAULT ON CONVERSION ERROR) | 6.66 + 7 | TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT ON CONVERSION ERROR) | Infinity + 8 | TO_BINARY_FLOAT( DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR) | + 9 | TO_BINARY_FLOAT(1.79769313486231E+400 DEFAULT ON CONVERSION ERROR) | Infinity + 10 | TO_BINARY_FLOAT( DEFAULT 1.79769313486231E+400 ON CONVERSION ERROR) | + 11 | TO_BINARY_FLOAT(3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 + 12 | TO_BINARY_FLOAT(3.14 DEFAULT ON CONVERSION ERROR) | 3.14 + 13 | TO_BINARY_FLOAT( DEFAULT 3.14 ON CONVERSION ERROR) | + 14 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 3.14 ON CONVERSION ERROR) | Infinity + 15 | TO_BINARY_FLOAT(3.14 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 3.14 + 20 | TO_BINARY_FLOAT(3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 + 21 | TO_BINARY_FLOAT( +3.14 DEFAULT 6.666666 ON CONVERSION ERROR) | 3.14 + 22 | TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 23 | TO_BINARY_FLOAT(6.666666 DEFAULT 17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ON CONVERSION ERROR) | 6.66667 + 24 | TO_BINARY_FLOAT(1.79769313486231E+400 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 26 | TO_BINARY_FLOAT(17976931348623100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 DEFAULT 6.666666 ON CONVERSION ERROR) | Infinity + 27 | TO_BINARY_FLOAT(6.666666 DEFAULT 1.79769313486231E+100 ON CONVERSION ERROR) | 6.66667 + 29 | TO_BINARY_FLOAT(6.666666 DEFAULT 1.79769313486231E+400 ON CONVERSION ERROR) | 6.66667 +(22 rows) DROP TABLE tbf; DROP TABLE tbf2; diff --git a/src/test/regress/expected/hw_datatype.out b/src/test/regress/expected/hw_datatype.out index 8b1a4c1e94..2ad8c0da06 100644 --- a/src/test/regress/expected/hw_datatype.out +++ b/src/test/regress/expected/hw_datatype.out @@ -222,19 +222,19 @@ CREATE TABLE test_type( INSERT INTO test_type VALUES(15.23448); INSERT INTO test_type VALUES(1E-323); INSERT INTO test_type VALUES(1E-324); ---?ERROR.* -CONTEXT: referenced column: my_double INSERT INTO test_type VALUES(1E+308); INSERT INTO test_type VALUES(1E+309); -ERROR: "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: my_double +ERROR: number overflow at or near "1E+309" +LINE 1: INSERT INTO test_type VALUES(1E+309); + ^ SELECT * FROM test_type order by 1; - my_double ------------------------ - 9.88131291682493e-324 - 15.23448 - 1e+308 -(3 rows) + my_double +----------- + 0 + 0 + 15.23448 + 1e+308 +(4 rows) DROP TABLE test_type; /* g.Type BINARY_INTEGER */ diff --git a/src/test/regress/expected/hw_datatype_2.out b/src/test/regress/expected/hw_datatype_2.out index 9ebab0daa3..b922236c0a 100644 --- a/src/test/regress/expected/hw_datatype_2.out +++ b/src/test/regress/expected/hw_datatype_2.out @@ -663,19 +663,19 @@ CREATE TABLE test_type( INSERT INTO test_type VALUES(15.23448); INSERT INTO test_type VALUES(1E-323); INSERT INTO test_type VALUES(1E-324); ---?ERROR.* -CONTEXT: referenced column: my_double INSERT INTO test_type VALUES(1E+308); INSERT INTO test_type VALUES(1E+309); -ERROR: "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: referenced column: my_double +ERROR: number overflow at or near "1E+309" +LINE 1: INSERT INTO test_type VALUES(1E+309); + ^ SELECT * FROM test_type order by 1; - my_double ------------------------ - 9.88131291682493e-324 - 15.23448 - 1e+308 -(3 rows) + my_double +----------- + 0 + 0 + 15.23448 + 1e+308 +(4 rows) DROP TABLE test_type; /* g.Type BINARY_INTEGER */ diff --git a/src/test/regress/output/copy_support_transform.source b/src/test/regress/output/copy_support_transform.source index aebea5376c..7cbf0155cf 100644 --- a/src/test/regress/output/copy_support_transform.source +++ b/src/test/regress/output/copy_support_transform.source @@ -45,8 +45,9 @@ copy test from '@abs_srcdir@/data/copy_support_transform.data' transform (mes1 c ERROR: value too long for type character(3) CONTEXT: COPY test, line 1, column mes1: "mmoo" copy test from '@abs_srcdir@/data/copy_support_transform.data' transform (mes1 text AS mes1 || mes2, mes2 float8 AS mes2 + 1E400, mes3 timestamp with time zone AS date_trunc('year', mes3)); -ERROR: "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" is out of range for type double precision -CONTEXT: COPY test, line 1: "1 mmoo 12.6789 Thu Jan 01 15:04:28 1970 PST 32767" +ERROR: number overflow at or near "1E400" +LINE 1: ...(mes1 text AS mes1 || mes2, mes2 float8 AS mes2 + 1E400, mes... + ^ copy test from '@abs_srcdir@/data/copy_support_transform.data' transform (mes5 text AS mes1 || mes2, mes2 float8 AS mes2 + 1, mes3 timestamp with time zone AS date_trunc('year', mes3)); ERROR: column "mes5" of relation "test" does not exist copy test from '@abs_srcdir@/data/copy_support_transform.data' transform (mes1 text AS mes5 || mes2, mes2 float8 AS mes2 + 1, mes3 timestamp with time zone AS date_trunc('year', mes3)); diff --git a/src/test/regress/sql/float_literals.sql b/src/test/regress/sql/float_literals.sql new file mode 100644 index 0000000000..26d5feffb6 --- /dev/null +++ b/src/test/regress/sql/float_literals.sql @@ -0,0 +1,76 @@ +-- test float literals overflow in dbcompatibility A +create database float_literals dbcompatibility 'A'; +\c float_literals + +SELECT 0.0; +SELECT -0.0; +SELECT 3.142596; +SELECT -3.142596; +SELECT 1.79E+400; +SELECT 1.79E-400; +SELECT -1.79E+400; + +SELECT '0.0'; +SELECT '-0.0'; +SELECT '3.142596'; +SELECT '-3.142596'; +SELECT '1.79E+400'; +SELECT '1.79E-400'; +SELECT '-1.79E+400'; + +SELECT '0.0'::float8; +SELECT '-0.0'::float8; +SELECT '3.142596'::float8; +SELECT '-3.142596'::float8; +SELECT '1.79E+400'::float8; +SELECT '1.79E-400'::float8; +SELECT '-1.79E+400'::float8; + +SELECT TO_BINARY_FLOAT(3.14 DEFAULT y ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(-3.14 DEFAULT + ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(3.14 DEFAULT - ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(-3.14 DEFAULT * ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(3.14 DEFAULT / ON CONVERSION ERROR); + +SELECT TO_BINARY_FLOAT('3.14' DEFAULT y ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('-3.14' DEFAULT + ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('3.14' DEFAULT - ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('-3.14' DEFAULT * ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('3.14' DEFAULT / ON CONVERSION ERROR); + +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT y ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT + ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT - ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT * ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT(1.79E+400 DEFAULT / ON CONVERSION ERROR); + +SELECT TO_BINARY_FLOAT('1.79E+400' DEFAULT y ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('1.79E-400' DEFAULT + ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('-1.79E+400' DEFAULT - ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('-1.79E-400' DEFAULT * ON CONVERSION ERROR); +SELECT TO_BINARY_FLOAT('1.79E+400' DEFAULT / ON CONVERSION ERROR); + +CREATE TABLE t_float_literals (id int, c1 float8); +INSERT INTO t_float_literals VALUES (1, 0.0); +INSERT INTO t_float_literals VALUES (2, 3.14); +INSERT INTO t_float_literals VALUES (3, 3.14E+40); +INSERT INTO t_float_literals VALUES (4, -3.14E+40); +INSERT INTO t_float_literals VALUES (5, '3.14E+40'::float8); +INSERT INTO t_float_literals VALUES (6, '-3.14E+40'::float8); +INSERT INTO t_float_literals VALUES (7, 3.14E+400); +INSERT INTO t_float_literals VALUES (8, 3.14E-400); +INSERT INTO t_float_literals VALUES (9, -3.14E+400); +INSERT INTO t_float_literals VALUES (10, '3.14E+400'::float8); +INSERT INTO t_float_literals VALUES (11, '3.14E-400'::float8); +INSERT INTO t_float_literals VALUES (12, '-3.14E+400'::float8); +SELECT * FROM t_float_literals ORDER bY id; + +UPDATE t_float_iterals SET c1 = 1.79E+400 WHERE id = 1; +UPDATE t_float_iterals SET c1 = '1.79E+400'::float8 WHERE id = 2; +UPDATE t_float_iterals SET c1 = 1.79E+40 WHERE id = 3; +UPDATE t_float_iterals SET c1 = '1.79E+40'::float8 WHERE id = 4; +SELECT * FROM t_float_iterals ORDER BY c1; + +drop table t_float_literals; +\c regression +drop database float_literals; \ No newline at end of file diff --git a/src/test/regress/sql/func_to_binary_float.sql b/src/test/regress/sql/func_to_binary_float.sql index 625f527cc6..1ae372e891 100644 --- a/src/test/regress/sql/func_to_binary_float.sql +++ b/src/test/regress/sql/func_to_binary_float.sql @@ -96,6 +96,8 @@ SELECT TO_BINARY_FLOAT('3.14E+400' DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDE SELECT TO_BINARY_FLOAT('3.14E+400' DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER By c1; -- error SELECT TO_BINARY_FLOAT(NULL DEFAULT c3 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error SELECT TO_BINARY_FLOAT(NULL DEFAULT c4 ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error +SELECT TO_BINARY_FLOAT(3.14E+400 DEFAULT ab ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- overflow +SELECT TO_BINARY_FLOAT('3.14E+400 DEFAULT' ab ON CONVERSION ERROR) FROM tbf ORDER BY c1; -- error -- test overflow and null SELECT TO_BINARY_FLOAT(1.79769313486231E+100 DEFAULT 3.14 ON CONVERSION ERROR); -- Gitee From 3d5f50b0126eaf0d5f1c25407bb5776789870b97 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Mon, 12 Aug 2024 11:15:52 +0800 Subject: [PATCH 167/347] =?UTF-8?q?=E6=94=AF=E6=8C=81=E6=89=93=E5=BC=80out?= =?UTF-8?q?=5Fparam=5Ffunc=5Foverload=E5=87=BD=E6=95=B0=E9=87=8D=E8=BD=BD?= =?UTF-8?q?=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/namespace.cpp | 4 + src/common/backend/catalog/pg_proc.cpp | 43 --- src/common/backend/parser/parse_func.cpp | 49 ++- src/common/pl/plpgsql/src/gram.y | 73 ++++- src/test/regress/expected/hw_package.out | 11 +- src/test/regress/expected/out_param_func.out | 15 +- .../expected/out_param_func_overload.out | 298 ++++++++++++++++++ src/test/regress/parallel_schedule0A | 2 +- .../regress/sql/out_param_func_overload.sql | 244 ++++++++++++++ 9 files changed, 642 insertions(+), 97 deletions(-) create mode 100644 src/test/regress/expected/out_param_func_overload.out create mode 100644 src/test/regress/sql/out_param_func_overload.sql diff --git a/src/common/backend/catalog/namespace.cpp b/src/common/backend/catalog/namespace.cpp index 9565b927a4..5283da0631 100644 --- a/src/common/backend/catalog/namespace.cpp +++ b/src/common/backend/catalog/namespace.cpp @@ -1656,6 +1656,10 @@ FuncCandidateList FuncnameGetCandidates(List* names, int nargs, List* argnames, if (pkg_oid != package_oid) { continue; } + Datum pro_is_private_datum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proisprivate, &isNull); + if (!isNull && !u_sess->is_autonomous_session && DatumGetBool(pro_is_private_datum)) { + continue; + } namespaceId = GetPackageNamespace(pkg_oid); } else if (caller_pkg_oid == package_oid) { if (pkgname != NULL) { diff --git a/src/common/backend/catalog/pg_proc.cpp b/src/common/backend/catalog/pg_proc.cpp index 946275a755..d30119793a 100644 --- a/src/common/backend/catalog/pg_proc.cpp +++ b/src/common/backend/catalog/pg_proc.cpp @@ -1521,49 +1521,6 @@ ObjectAddress ProcedureCreate(const char* procedureName, Oid procNamespace, Oid ObjectIdGetDatum(procNamespace)); #endif } -#ifndef ENABLE_MULTIPLE_NODES - if (enable_out_param_override() && !u_sess->attr.attr_common.IsInplaceUpgrade && !IsInitdb && !proIsProcedure && - IsPlpgsqlLanguageOid(languageObjectId)) { - bool findOutParamFunc = false; - CatCList *catlist = NULL; - if (t_thrd.proc->workingVersionNum < 92470) { - catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(procedureName)); - } else { - catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(procedureName)); - } - for (int i = 0; i < catlist->n_members; ++i) { - HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); - Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); - bool isNull = false; - Datum packageOidDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_packageid, &isNull); - Oid packageOid = InvalidOid; - if (!isNull) { - packageOid = DatumGetObjectId(packageOidDatum); - } - if (packageOid == propackageid && procform->pronamespace == procNamespace) { - isNull = false; - (void)SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proallargtypes, &isNull); - if (!isNull) { - findOutParamFunc = true; - break; - } - } - } - - ReleaseSysCacheList(catlist); - if (existOutParam) { - if (!HeapTupleIsValid(oldtup) && findOutParamFunc) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - (errmsg("\"%s\" functions with plpgsql language and out params are not supported Overloaded.", - procedureName), - errdetail("N/A."), - errcause("functions with plpgsql language and out params are not supported Overloaded."), - erraction("Drop function before create function.")))); - } - } - } -#endif if (HeapTupleIsValid(oldtup)) { /* There is one; okay to replace it? */ bool isNull = false; diff --git a/src/common/backend/parser/parse_func.cpp b/src/common/backend/parser/parse_func.cpp index dbb514d093..e9eed9439e 100644 --- a/src/common/backend/parser/parse_func.cpp +++ b/src/common/backend/parser/parse_func.cpp @@ -1493,42 +1493,31 @@ FuncCandidateList sort_candidate_func_list(FuncCandidateList oldCandidates) candidates[index++] = cur; cur = cur->next; } - - FuncCandidateList sortedCandidates = NULL; - FuncCandidateList lastCandidate = NULL; - for (int i = 0; i < size; i++) { - if (candidates[i] == NULL) { - continue; - } - int smallestIndex = i; - for (int j = 0; j < size; j++) { - FuncCandidateList cur2 = candidates[j]; - if (cur2 != NULL && candidates[smallestIndex]->allArgNum > cur2->allArgNum) { - smallestIndex = j; - } - } - FuncCandidateList smallest = candidates[smallestIndex]; - if (lastCandidate == NULL) { - lastCandidate = smallest; - sortedCandidates = smallest; - } else { - lastCandidate->next = smallest; - lastCandidate = lastCandidate->next; - smallest->next = NULL; + int i, j; + FuncCandidateList temp; + for (i = 0; i < index - 1; i++) { + for (j = 0; j < index - (i + 1); j++) { + if (candidates[j]->allArgNum > candidates[j + 1]->allArgNum) { + temp = candidates[j]; + candidates[j] = candidates[j + 1]; + candidates[j + 1] = temp; + } } - candidates[smallestIndex] = NULL; } - - for (int i = 0; i < size; i++) { - if (candidates[i] != NULL) { - lastCandidate->next = candidates[i]; - lastCandidate = lastCandidate->next; + + FuncCandidateList sorted_candidates = candidates[0]; + FuncCandidateList next_candidates = sorted_candidates; + for (i = 0; i < index; i++) { + candidates[i]->next = NULL; + if (i != 0) { + next_candidates->next = candidates[i]; + next_candidates = next_candidates->next; } } - lastCandidate->next = NULL; + pfree(candidates); - return sortedCandidates; + return sorted_candidates; } /* func_get_detail() diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 9e2c0a79f7..9a72b649c9 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -228,6 +228,7 @@ static void checkFuncName(List* funcname); static void IsInPublicNamespace(char* varname); static void CheckDuplicateCondition (char* name); static void SetErrorState(); +static bool oid_is_function(Oid funcid, bool* isSystemObj); static void AddNamespaceIfNeed(int dno, char* ident); static void AddNamespaceIfPkgVar(const char* ident, IdentifierLookup save_IdentifierLookup); bool plpgsql_is_token_keyword(int tok); @@ -7230,7 +7231,8 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate return NULL; } - if (clist->next) + bool isSystemObj = false; + if (clist->next || (clist->next && enable_out_param_override() && oid_is_function(clist->oid, &isSystemObj))) { multi_func = true; char* schemaname = NULL; @@ -7311,7 +7313,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate is_assign = false; } /* has any "out" parameters, user execsql stmt */ - if (is_assign) + if (is_assign && !(clist->next && enable_out_param_override() && oid_is_function(clist->oid, &isSystemObj))) { appendStringInfoString(&func_inparas, "SELECT "); } @@ -7633,6 +7635,12 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate appendStringInfoString(&func_inparas, "=>"); (void)yylex(); yylex_outparam(fieldnames, varnos, nfields, &row, &rec, &tok, &varno, true, true); + if (varno != -1 && enable_out_param_override() && oid_is_function(clist->oid, &isSystemObj)) { + int dtype = u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[varno]->dtype; + if (dtype == PLPGSQL_DTYPE_ROW) { + out_param_dno = yylval.wdatum.dno; + } + } plpgsql_push_back_token(tok); expr = read_sql_construct(',', ')', 0, ",|)", "", true, false, false, NULL, &tok); appendStringInfoString(&func_inparas, expr->query); @@ -7645,6 +7653,12 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate else { yylex_outparam(fieldnames, varnos, nfields, &row, &rec, &tok, &varno, true, true); + if (varno != -1 && enable_out_param_override() && oid_is_function(clist->oid, &isSystemObj)) { + int dtype = u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[varno]->dtype; + if (dtype == PLPGSQL_DTYPE_ROW) { + out_param_dno = yylval.wdatum.dno; + } + } plpgsql_push_back_token(tok); yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); namedarg[nfields] = false; @@ -7766,8 +7780,10 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate narg = procStruct->pronargs; ReleaseSysCache(proctup); - for (int k = 0; k < all_arg; k++) { - check_tableofindex_args(varnos[k], p_argtypes[k]); + if (!(enable_out_param_override() && oid_is_function(clist->oid, &isSystemObj))) { + for (int k = 0; k < all_arg; k++) { + check_tableofindex_args(varnos[k], p_argtypes[k]); + } } /* if there is no "out" parameters ,use perform stmt,others use exesql */ @@ -8166,14 +8182,15 @@ is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNam } return false; } - else if (clist->next) + bool isSystemObj = false; + if (clist->next && !(enable_out_param_override() && oid_is_function(clist->oid, & isSystemObj))) { if (is_assign) return false; else return true; } - else + while(clist) { proctup = SearchSysCache(PROCOID, ObjectIdGetDatum(clist->oid), @@ -8211,14 +8228,18 @@ is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNam } } } + ReleaseSysCache(proctup); + if (have_inoutargs && is_function_with_plpgsql_language_and_outparam(clist->oid)) + return true; + + if (!have_outargs && is_assign && + !(clist->next && enable_out_param_override() && oid_is_function(clist->oid, &isSystemObj))) + return false; + + clist = clist->next; } - if (have_inoutargs && is_function_with_plpgsql_language_and_outparam(clist->oid)) - return true; - if (!have_outargs && is_assign) - return false; - return true;/* passed all test */ } return false; @@ -8250,6 +8271,36 @@ static void checkFuncName(List* funcname) (void)MemoryContextSwitchTo(colCxt); } +static bool oid_is_function(Oid funcid, bool* isSystemObj) +{ + HeapTuple proctup = NULL; + if (OidIsValid(funcid)) { + proctup = SearchSysCache(PROCOID, ObjectIdGetDatum(funcid), 0, 0, 0); + if (HeapTupleIsValid(proctup)) { + bool isNull; + Datum protypeDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prokind, &isNull); + char protype; + if (!isNull) { + protype = DatumGetChar(protypeDatum); + } else { + protype = PROKIND_FUNCTION; + } + Datum pronamespaceDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_pronamespace, &isNull); + Oid pronamespace = DatumGetObjectId(pronamespaceDatum); + Assert(OidIsValid(pronamespace)); + ReleaseSysCache(proctup); + if (IsPackageSchemaOid(pronamespace) || IsSystemNamespace(pronamespace)) { + *isSystemObj = true; + return false; + } + if (PROC_IS_FUNC(protype)) { + return true; + } + } + } + return false; +} + /* * @brief is_datatype * check if a given type is a datatype diff --git a/src/test/regress/expected/hw_package.out b/src/test/regress/expected/hw_package.out index 4f8fdf738e..10568d3dff 100644 --- a/src/test/regress/expected/hw_package.out +++ b/src/test/regress/expected/hw_package.out @@ -467,8 +467,13 @@ function autonomous_f_150_2_private(pnum1 int) return int end; end autonomous_pkg_150_2; / +ERROR: function "autonomous_pkg_150_1.autonomous_f_150_1_private" doesn't exist +CONTEXT: compilation of PL/pgSQL package near line 2 select autonomous_pkg_150_2.autonomous_f_150_2_private(1); -ERROR: not support call package private function or procedure +ERROR: function autonomous_pkg_150_2.autonomous_f_150_2_private(integer) does not exist +LINE 1: select autonomous_pkg_150_2.autonomous_f_150_2_private(1); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: referenced column: autonomous_f_150_2_private drop table if exists au_pkg; create table au_pkg(id int,name varchar); @@ -1667,9 +1672,7 @@ drop table if exists test.emp_t; drop schema if exists test cascade; drop table if exists au_pkg; drop package autonomous_pkg_150_2; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to function public.autonomous_f_150_2(integer) -drop cascades to function public.autonomous_f_150_2_private(integer) +NOTICE: drop cascades to function public.autonomous_f_150_2(integer) drop package autonomous_pkg_150_1; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function public.autonomous_f_150_1(integer) diff --git a/src/test/regress/expected/out_param_func.out b/src/test/regress/expected/out_param_func.out index e1ad69effb..edf5c9e89f 100644 --- a/src/test/regress/expected/out_param_func.out +++ b/src/test/regress/expected/out_param_func.out @@ -1042,8 +1042,6 @@ DECLARE return c; END; $$ LANGUAGE 'plpgsql' NOT FENCED; -ERROR: "func8_1" functions with plpgsql language and out params are not supported Overloaded. -DETAIL: N/A. --8.2 同一schema、package下,不允许存在同名的plpgsql语言的out出参函数,但可以replace CREATE or replace FUNCTION func8_2(in a integer, out b integer) RETURNS int @@ -1068,8 +1066,6 @@ DECLARE return c; END; $$ LANGUAGE 'plpgsql' NOT FENCED; -ERROR: "func8_2" functions with plpgsql language and out params are not supported Overloaded. -DETAIL: N/A. CREATE or replace FUNCTION func8_2(in a integer, out b integer) RETURNS int AS $$ @@ -1089,8 +1085,6 @@ function func8_2(in a int, out b int, out d integer) return int; end pck8_2; / -ERROR: "func8_2" functions with plpgsql language and out params are not supported Overloaded. -DETAIL: N/A. --8.3 同一schema、package下,允许存在同名的psql语言的不带out出参函数 CREATE or replace FUNCTION func8_3(in a integer) RETURNS int @@ -1222,7 +1216,7 @@ drop function v_func1; --clean reset behavior_compat_options; drop schema out_param_schema cascade; -NOTICE: drop cascades to 24 other objects +NOTICE: drop cascades to 29 other objects DETAIL: drop cascades to function func1(integer) drop cascades to function func1_1(integer,integer) drop cascades to function func2(integer) @@ -1239,7 +1233,12 @@ drop cascades to function out_param_schema.func7_1(integer) drop cascades to function out_param_schema.func7_2(integer) drop cascades to function out_param_schema.func8_1(integer) drop cascades to function out_param_schema.func8_1(integer) -drop cascades to function func8_2(integer) +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* drop cascades to function func8_3(integer) drop cascades to function func8_3(integer,integer) --?.* diff --git a/src/test/regress/expected/out_param_func_overload.out b/src/test/regress/expected/out_param_func_overload.out new file mode 100644 index 0000000000..8c76cecb5d --- /dev/null +++ b/src/test/regress/expected/out_param_func_overload.out @@ -0,0 +1,298 @@ +create schema out_param_func_overload; +set current_schema= out_param_func_overload; +set behavior_compat_options='proc_outparam_override'; +-- 0 +create or replace package pkg_type +as +function func(i1 in int, o1 out number, o2 out varchar2) return number; +function func(i1 in int, i2 in int, o1 out number, o2 out varchar2) return number; +end pkg_type; +/ +create or replace package body pkg_type +is + function func(i1 in int, o1 out number, o2 out varchar2) + return number + is + BEGIN + raise notice 'func(i1 in int, o1 out number, o2 out varchar2)'; + o1 := 12.34; + o2 := 'test1'; + return 12.34; + end; + function func(i1 in int, i2 in int, o1 out number, o2 out varchar2) + return number + is + begin + raise notice 'func(i1 in int, i2 in int, o1 out number, o2 out varchar2)'; + o1 := 43.21; + o2 := 'test2'; + return 43.21; + end; +end pkg_type; +/ +DECLARE +ii1 int := -1; +ii2 int := -1; +oo1 number := -1; +oo2 varchar2 := ''; +rr1 number := -1; +begin + rr1 := pkg_type.func(ii1, ii2, oo1, oo2); + raise notice 'pkg_type ii1:%', ii1; + raise notice 'pkg_type ii2:%', ii2; + raise notice 'pkg_type oo1:%', oo1; + raise notice 'pkg_type oo2:%', oo2; + raise notice 'pkg_type rr1:%', rr1; +END; +/ +NOTICE: func(i1 in int, i2 in int, o1 out number, o2 out varchar2) +CONTEXT: SQL statement "CALL pkg_type.func(ii1,ii2,oo1,oo2)" +PL/pgSQL function inline_code_block line 7 at assignment +NOTICE: pkg_type ii1:-1 +NOTICE: pkg_type ii2:-1 +NOTICE: pkg_type oo1:43.21 +NOTICE: pkg_type oo2:test2 +NOTICE: pkg_type rr1:43.21 +DECLARE +ii1 int := -1; +ii2 int := -1; +oo1 number := -1; +oo2 varchar2 := ''; +rr1 number := -1; +begin + rr1 := pkg_type.func(ii1, oo1, oo2); + raise notice 'pkg_type ii1:%', ii1; + raise notice 'pkg_type ii2:%', ii2; + raise notice 'pkg_type oo1:%', oo1; + raise notice 'pkg_type oo2:%', oo2; + raise notice 'pkg_type rr1:%', rr1; +END; +/ +NOTICE: func(i1 in int, o1 out number, o2 out varchar2) +CONTEXT: SQL statement "CALL pkg_type.func(ii1,oo1,oo2)" +PL/pgSQL function inline_code_block line 7 at assignment +NOTICE: pkg_type ii1:-1 +NOTICE: pkg_type ii2:-1 +NOTICE: pkg_type oo1:12.34 +NOTICE: pkg_type oo2:test1 +NOTICE: pkg_type rr1:12.34 +drop package pkg_type; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function out_param_func_overload.func(integer) +drop cascades to function out_param_func_overload.func(integer,integer) +-- 1 private +create or replace package pkg_type as +function func(a varchar2) return varchar2; +end pkg_type; +/ +create or replace package body pkg_type as +function func(a varchar2) return varchar2 +as +b varchar2(5) :='var'; +BEGIN +return b; +end; +function func(a integer) return integer +as +b integer := 2; +BEGIN +return b; +end; +end pkg_type; +/ +select pkg_type.func(1); + func +------ + var +(1 row) + +drop package pkg_type; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function out_param_func_overload.func(character varying) +drop cascades to function out_param_func_overload.func(integer) +-- test overload +create or replace package pkg_type is + function func(a int) return int; + function func(a int, b out int) return int; +end pkg_type; +/ +create or replace package body pkg_type +is + function func(a int) + return int + is + BEGIN + raise notice 'func(a int)'; + return 1; + end; + function func(a int, b out int) + return int + is + begin + b := 1; + raise notice 'func(a int, b out int)'; + return 2; + end; +end pkg_type; +/ +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +NOTICE: func(a int) +CONTEXT: SQL statement "CALL pkg_type.func(a)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: pkg_type a:-1 +NOTICE: pkg_type b:-1 +NOTICE: pkg_type c:1 +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a, b); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +NOTICE: func(a int, b out int) +CONTEXT: SQL statement "CALL pkg_type.func(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: pkg_type a:-1 +NOTICE: pkg_type b:1 +NOTICE: pkg_type c:2 +drop package pkg_type; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function out_param_func_overload.func(integer) +drop cascades to function out_param_func_overload.func(integer) +-- test overload with out +create or replace package pkg_type is + function func(a out int) return int; + function func(a int, b out int) return int; +end pkg_type; +/ +create or replace package body pkg_type +is + function func(a out int) + return int + is + BEGIN + raise notice 'func(a out int)'; + a := 1; + return 2; + end; + function func(a int, b out int) + return int + is + begin + raise notice 'func(a int, b out int)'; + b := 1; + return 3; + end; +end pkg_type; +/ +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a, b); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +NOTICE: func(a int, b out int) +CONTEXT: SQL statement "CALL pkg_type.func(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: pkg_type a:-1 +NOTICE: pkg_type b:1 +NOTICE: pkg_type c:3 +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +NOTICE: func(a out int) +CONTEXT: SQL statement "CALL pkg_type.func(a)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: pkg_type a:1 +NOTICE: pkg_type b:-1 +NOTICE: pkg_type c:2 +drop package pkg_type; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function out_param_func_overload.func() +drop cascades to function out_param_func_overload.func(integer) +create or replace package pkg_type is + function func(a int, b out int) return int; + function func2(a int, b out int) return int; +end pkg_type; +/ +create or replace package body pkg_type +is + function func(a int, b out int) + return int + is + BEGIN + b := 1; + raise notice 'func(a int, b out int)'; + return 1; + end; + function func2(a int, b out int) + return int + is + begin + b := 2; + raise notice 'func2(a int, b out int): b:%', b; + func(a, b); + raise notice 'func2(a int, b out int): b:%', b; + return 2; + end; +end pkg_type; +/ +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func2(a, b); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +NOTICE: func2(a int, b out int): b:2 +CONTEXT: SQL statement "CALL pkg_type.func2(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: func(a int, b out int) +CONTEXT: SQL statement "CALL func(a,b)" +PL/pgSQL function func2(integer) line 4 at SQL statement +SQL statement "CALL pkg_type.func2(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: func2(a int, b out int): b:1 +CONTEXT: SQL statement "CALL pkg_type.func2(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +NOTICE: pkg_type a:-1 +NOTICE: pkg_type b:1 +NOTICE: pkg_type c:2 +drop package pkg_type; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function out_param_func_overload.func(integer) +drop cascades to function out_param_func_overload.func2(integer) +--clean +reset behavior_compat_options; +drop schema out_param_func_overload cascade; diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 819c7cb6e0..3c9ed4c488 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -64,7 +64,7 @@ test: select_into_user_defined_variables test: select_into_file test: gs_dump_package trigger_dump -test: out_param_func +test: out_param_func out_param_func_overload #test: sqlcode_cursor test: gs_dump_tableconstraint diff --git a/src/test/regress/sql/out_param_func_overload.sql b/src/test/regress/sql/out_param_func_overload.sql new file mode 100644 index 0000000000..06e4480318 --- /dev/null +++ b/src/test/regress/sql/out_param_func_overload.sql @@ -0,0 +1,244 @@ +create schema out_param_func_overload; +set current_schema= out_param_func_overload; +set behavior_compat_options='proc_outparam_override'; + + +-- 0 +create or replace package pkg_type +as +function func(i1 in int, o1 out number, o2 out varchar2) return number; +function func(i1 in int, i2 in int, o1 out number, o2 out varchar2) return number; +end pkg_type; +/ + +create or replace package body pkg_type +is + function func(i1 in int, o1 out number, o2 out varchar2) + return number + is + BEGIN + raise notice 'func(i1 in int, o1 out number, o2 out varchar2)'; + o1 := 12.34; + o2 := 'test1'; + return 12.34; + end; + function func(i1 in int, i2 in int, o1 out number, o2 out varchar2) + return number + is + begin + raise notice 'func(i1 in int, i2 in int, o1 out number, o2 out varchar2)'; + o1 := 43.21; + o2 := 'test2'; + return 43.21; + end; +end pkg_type; +/ + +DECLARE +ii1 int := -1; +ii2 int := -1; +oo1 number := -1; +oo2 varchar2 := ''; +rr1 number := -1; +begin + rr1 := pkg_type.func(ii1, ii2, oo1, oo2); + raise notice 'pkg_type ii1:%', ii1; + raise notice 'pkg_type ii2:%', ii2; + raise notice 'pkg_type oo1:%', oo1; + raise notice 'pkg_type oo2:%', oo2; + raise notice 'pkg_type rr1:%', rr1; +END; +/ + +DECLARE +ii1 int := -1; +ii2 int := -1; +oo1 number := -1; +oo2 varchar2 := ''; +rr1 number := -1; +begin + rr1 := pkg_type.func(ii1, oo1, oo2); + raise notice 'pkg_type ii1:%', ii1; + raise notice 'pkg_type ii2:%', ii2; + raise notice 'pkg_type oo1:%', oo1; + raise notice 'pkg_type oo2:%', oo2; + raise notice 'pkg_type rr1:%', rr1; +END; +/ +drop package pkg_type; + +-- 1 private +create or replace package pkg_type as +function func(a varchar2) return varchar2; +end pkg_type; +/ +create or replace package body pkg_type as +function func(a varchar2) return varchar2 +as +b varchar2(5) :='var'; +BEGIN +return b; +end; +function func(a integer) return integer +as +b integer := 2; +BEGIN +return b; +end; +end pkg_type; +/ +select pkg_type.func(1); +drop package pkg_type; + +-- test overload +create or replace package pkg_type is + function func(a int) return int; + function func(a int, b out int) return int; +end pkg_type; +/ +create or replace package body pkg_type +is + function func(a int) + return int + is + BEGIN + raise notice 'func(a int)'; + return 1; + end; + function func(a int, b out int) + return int + is + begin + b := 1; + raise notice 'func(a int, b out int)'; + return 2; + end; +end pkg_type; +/ + +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ + +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a, b); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +drop package pkg_type; + + +-- test overload with out +create or replace package pkg_type is + function func(a out int) return int; + function func(a int, b out int) return int; +end pkg_type; +/ + +create or replace package body pkg_type +is + function func(a out int) + return int + is + BEGIN + raise notice 'func(a out int)'; + a := 1; + return 2; + end; + function func(a int, b out int) + return int + is + begin + raise notice 'func(a int, b out int)'; + b := 1; + return 3; + end; +end pkg_type; +/ + +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a, b); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ + +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func(a); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ +drop package pkg_type; + + +create or replace package pkg_type is + function func(a int, b out int) return int; + function func2(a int, b out int) return int; +end pkg_type; +/ +create or replace package body pkg_type +is + function func(a int, b out int) + return int + is + BEGIN + b := 1; + raise notice 'func(a int, b out int)'; + return 1; + end; + function func2(a int, b out int) + return int + is + begin + b := 2; + raise notice 'func2(a int, b out int): b:%', b; + func(a, b); + raise notice 'func2(a int, b out int): b:%', b; + return 2; + end; +end pkg_type; +/ + +DECLARE +a int := -1; +b int := -1; +c int := -1; +begin + c := pkg_type.func2(a, b); + raise notice 'pkg_type a:%', a; + raise notice 'pkg_type b:%', b; + raise notice 'pkg_type c:%', c; +END; +/ + +drop package pkg_type; + +--clean +reset behavior_compat_options; + +drop schema out_param_func_overload cascade; -- Gitee From 8af10b32e41c5bbc86e9e6df21d34e2459b25789 Mon Sep 17 00:00:00 2001 From: hwhbj Date: Fri, 9 Aug 2024 16:51:20 +0800 Subject: [PATCH 168/347] =?UTF-8?q?=E5=88=A0=E9=99=A4=E5=86=97=E4=BD=99?= =?UTF-8?q?=E7=9A=84server=5Fsupport=5Ftrace=EF=BC=8C=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E4=BD=BF=E7=94=A8enable=5Frecord=5Fnettime=E6=9B=BF=E4=BB=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/builtin_funcs.ini | 2 +- src/common/backend/utils/misc/guc.cpp | 16 +--------------- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index f8c0ec6aab..59af98d644 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -11091,7 +11091,7 @@ AddFuncGroup( AddFuncGroup( "standby_statement_history", 2, AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(72, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(72, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(72, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown", "parent_query_id", "net_trans_time"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(73, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(73, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(73, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown", "parent_query_id", "net_trans_time"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(73, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(73, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(73, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown", "parent_query_id", "net_trans_time"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "statement_detail_decode", 1, diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index eda659f416..33b089a0f4 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -2104,6 +2104,7 @@ static void InitConfigureNamesBool() STATS_COLLECTOR, gettext_noop("Enable record network time"), NULL, + GUC_REPORT }, &u_sess->attr.attr_common.enable_record_nettime, false, @@ -4590,7 +4591,6 @@ static void InitializeGUCOptionsFromEnvironment(void); static void InitializeOneGUCOption(struct config_generic* gconf); static void push_old_value(struct config_generic* gconf, GucAction action); static void ReportGUCOption(struct config_generic* record); -static void ReportTraceOption(); static void reapply_stacked_values(struct config_generic* variable, struct config_string* pHolder, GucStack* stack, const char* curvalue, GucContext curscontext, GucSource cursource); static void ShowGUCConfigOption(const char* name, DestReceiver* dest); @@ -6689,23 +6689,9 @@ void BeginReportingGUCOptions(void) if (conf->flags & GUC_REPORT) ReportGUCOption(conf); } - if (u_sess->attr.attr_common.enable_record_nettime) { - ReportTraceOption(); - } } -/* - * notify client connection driver support trace, low version server does not send this message. - */ -static void ReportTraceOption() { - StringInfoData msgbuf; - pq_beginmessage(&msgbuf, 'S'); - pq_sendstring(&msgbuf, "server_support_trace"); - pq_sendstring(&msgbuf, "1"); - pq_endmessage(&msgbuf); -} - /* * ReportGUCOption: if appropriate, transmit option value to frontend */ -- Gitee From 4fa2725d228d844be716a7223e17dda10d463b87 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Mon, 8 Jul 2024 16:53:48 +0800 Subject: [PATCH 169/347] =?UTF-8?q?=E7=A6=81=E6=AD=A2=E6=97=A0=E6=9D=83?= =?UTF-8?q?=E9=99=90=E7=94=A8=E6=88=B7=E6=89=93=E5=BC=80=E5=A4=A7=E5=AF=B9?= =?UTF-8?q?=E8=B1=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/libpq/be-fsstubs.cpp | 1 + .../storage/large_object/inv_api.cpp | 24 +++++++-- .../expected/large_object_permission.out | 51 +++++++++++++++++++ src/test/regress/parallel_schedule0B | 2 +- .../regress/sql/large_object_permission.sql | 22 ++++++++ 5 files changed, 96 insertions(+), 4 deletions(-) create mode 100644 src/test/regress/expected/large_object_permission.out create mode 100644 src/test/regress/sql/large_object_permission.sql diff --git a/src/common/backend/libpq/be-fsstubs.cpp b/src/common/backend/libpq/be-fsstubs.cpp index 28e3bca08c..b06c949132 100644 --- a/src/common/backend/libpq/be-fsstubs.cpp +++ b/src/common/backend/libpq/be-fsstubs.cpp @@ -82,6 +82,7 @@ Datum lo_open(PG_FUNCTION_ARGS) CreateFSContext(); lobjDesc = inv_open(lobjId, mode, u_sess->libpq_cxt.fscxt); if (lobjDesc == NULL) { /* lookup failed */ + elog(DEBUG4, "could not open large object %u", lobjId); PG_RETURN_INT32(-1); } diff --git a/src/gausskernel/storage/large_object/inv_api.cpp b/src/gausskernel/storage/large_object/inv_api.cpp index 34798874e3..112c849ca2 100644 --- a/src/gausskernel/storage/large_object/inv_api.cpp +++ b/src/gausskernel/storage/large_object/inv_api.cpp @@ -249,7 +249,21 @@ LargeObjectDesc* inv_open(Oid lobjId, int flags, MemoryContext mcxt) /* Can't use LargeObjectExists here because it always uses SnapshotNow */ if (!myLargeObjectExists(lobjId, snapshot)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("large object %u does not exist", lobjId))); - + + /* Apply permission checks, again specifying snapshot */ + if ((descflags & IFS_RDLOCK) != 0) { + if (!u_sess->attr.attr_sql.lo_compat_privileges && + pg_largeobject_aclcheck_snapshot(lobjId, GetUserId(), ACL_SELECT, snapshot) != ACLCHECK_OK) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied for large object %u", lobjId))); + } + if ((descflags & IFS_WRLOCK) != 0) { + if (!u_sess->attr.attr_sql.lo_compat_privileges && + pg_largeobject_aclcheck_snapshot(lobjId, GetUserId(), ACL_UPDATE, snapshot) != ACLCHECK_OK) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied for large object %u", lobjId))); + } + /* * We must register the snapshot in TopTransaction's resowner, because * it must stay alive until the LO is closed rather than until the @@ -542,7 +556,9 @@ int inv_write(LargeObjectDesc* obj_desc, const char* buf, int nbytes) Assert(buf != NULL); /* enforce writability because snapshot is probably wrong otherwise */ - Assert(obj_desc->flags & IFS_WRLOCK); + if ((obj_desc->flags & IFS_WRLOCK) == 0) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied for large object %u", obj_desc->id))); if (nbytes <= 0) { return 0; @@ -722,7 +738,9 @@ void inv_truncate(LargeObjectDesc* obj_desc, int64 len) Assert(PointerIsValid(obj_desc)); /* enforce writability because snapshot is probably wrong otherwise */ - Assert(obj_desc->flags & IFS_WRLOCK); + if ((obj_desc->flags & IFS_WRLOCK) == 0) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied for large object %u", obj_desc->id))); /* * use errmsg_internal here because we don't want to expose INT64_FORMAT diff --git a/src/test/regress/expected/large_object_permission.out b/src/test/regress/expected/large_object_permission.out new file mode 100644 index 0000000000..9f9cb7312b --- /dev/null +++ b/src/test/regress/expected/large_object_permission.out @@ -0,0 +1,51 @@ +create database large_object_test_db; +\c large_object_test_db +SELECT lo_create(100); + lo_create +----------- + 100 +(1 row) + +CREATE USER u1 with password 'qwer@1234'; +CREATE USER u2 with password 'qwer@1234'; +GRANT SELECT ON LARGE OBJECT 100 to u1; +SET SESSION AUTHORIZATION u1 PASSWORD 'qwer@1234'; +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- + u1 | u1 +(1 row) + +select lo_open(100, CAST(x'20000' | x'40000' AS integer)); +ERROR: permission denied for large object 100 +CONTEXT: referenced column: lo_open +select lo_open(100, CAST(x'40000' AS integer)); + lo_open +--------- + 0 +(1 row) + +SET SESSION AUTHORIZATION u2 PASSWORD 'qwer@1234'; +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- + u2 | u2 +(1 row) + +select lo_open(100, CAST(x'20000' | x'40000' AS integer)); +ERROR: permission denied for large object 100 +CONTEXT: referenced column: lo_open +select lo_open(100, CAST(x'40000' AS integer)); +ERROR: permission denied for large object 100 +CONTEXT: referenced column: lo_open +\c regression +reset session AUTHORIZATION; +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- +--?.* +(1 row) + +drop database large_object_test_db; +drop user u1; +drop user u2; diff --git a/src/test/regress/parallel_schedule0B b/src/test/regress/parallel_schedule0B index 727e170ce3..54763a70b4 100644 --- a/src/test/regress/parallel_schedule0B +++ b/src/test/regress/parallel_schedule0B @@ -295,7 +295,7 @@ test: plpgsql_reset_session plpgsql_nested_array_and_record #test: plpgsql_depend test: plpgsql_depend/plpgsql_depend_type plpgsql_depend/plpgsql_pkg_dependency plpgsql_depend/plpgsql_recompile plpgsql_depend/plpgsql_pkg_variable_dependency plpgsql_depend/plpgsql_depend_reftype #test: plancache limit rangefuncs prepare -test: returning largeobject +test: returning largeobject large_object_permission test: hw_explain_pretty1 hw_explain_pretty2 hw_explain_pretty3 test: goto test: equivalence_class diff --git a/src/test/regress/sql/large_object_permission.sql b/src/test/regress/sql/large_object_permission.sql new file mode 100644 index 0000000000..8b43bd0974 --- /dev/null +++ b/src/test/regress/sql/large_object_permission.sql @@ -0,0 +1,22 @@ +create database large_object_test_db; +\c large_object_test_db +SELECT lo_create(100); +CREATE USER u1 with password 'qwer@1234'; +CREATE USER u2 with password 'qwer@1234'; +GRANT SELECT ON LARGE OBJECT 100 to u1; +SET SESSION AUTHORIZATION u1 PASSWORD 'qwer@1234'; +SELECT SESSION_USER, CURRENT_USER; +select lo_open(100, CAST(x'20000' | x'40000' AS integer)); +select lo_open(100, CAST(x'40000' AS integer)); +SET SESSION AUTHORIZATION u2 PASSWORD 'qwer@1234'; +SELECT SESSION_USER, CURRENT_USER; +select lo_open(100, CAST(x'20000' | x'40000' AS integer)); +select lo_open(100, CAST(x'40000' AS integer)); + +\c regression +reset session AUTHORIZATION; +SELECT SESSION_USER, CURRENT_USER; +drop database large_object_test_db; +drop user u1; +drop user u2; + -- Gitee From 0d1f8df897d0032bfa5e1be2ce198cafb857f183 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Thu, 27 Jun 2024 17:19:32 +0800 Subject: [PATCH 170/347] =?UTF-8?q?=E6=A3=80=E6=B5=8B=E5=AD=97=E7=AC=A6?= =?UTF-8?q?=E4=B8=B2=E9=95=BF=E5=BA=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 38 +++++- .../expected/plpgsql_cursor_rowtype.out | 128 ++++++++++++++++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 121 +++++++++++++++++ 3 files changed, 282 insertions(+), 5 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index c4acf98074..a11c75befa 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1158,6 +1158,29 @@ static TupleDesc get_cursor_tupledesc_exec(PLpgSQL_expr* expr, bool isOnlySelect return tupleDesc; } +static void rowtype_column_len_check(Form_pg_attribute tattr, HeapTuple var_tup, TupleDesc var_tupdesc, Oid valtype, int fnum) +{ + if (tattr->atttypid == VARCHAROID || tattr->atttypid == CHAROID) { + int maxlen = tattr->atttypmod - VARHDRSZ; + if (valtype == VARCHAROID|| + valtype == NVARCHAR2OID|| + valtype == CHAROID|| + valtype == UNKNOWNOID|| + valtype == CSTRINGOID|| + valtype == TEXTOID|| + valtype == NAMEOID) { + char *val = SPI_getvalue(var_tup, var_tupdesc, fnum + 1); + if (val && maxlen > 0) { + int valLen = strlen(val); + if (valLen > maxlen) { + ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type character varying(%d)", maxlen))); + } + } + } + } +} + static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *datum, PLpgSQL_function *func) { bool *replaces = NULL; @@ -1207,15 +1230,17 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d bool isnull; Oid valtype; int32 valtypmod; - Oid reqtype = TupleDescAttr(new_tupdesc, fnum)->atttypid; + Form_pg_attribute tattr = TupleDescAttr(new_tupdesc, fnum); + Form_pg_attribute attr = TupleDescAttr(rec->tupdesc, anum); + Oid reqtype = tattr->atttypid; - while (anum < new_natts && TupleDescAttr(rec->tupdesc, anum)->attisdropped) + while (anum < new_natts && attr->attisdropped) { anum++; /* skip dropped column in tuple */ - + } if (anum < new_natts) { value = SPI_getbinval(rec->tup, rec->tupdesc, anum + 1, &isnull); - valtype = TupleDescAttr(rec->tupdesc, anum)->atttypid; - valtypmod = TupleDescAttr(rec->tupdesc, anum)->atttypmod; + valtype = attr->atttypid; + valtypmod = attr->atttypmod; anum++; } else { /* When source value is missing */ @@ -1225,6 +1250,7 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d errdetail("%s check is active.", "strict_multi_assignment"), errhint("Make sure the query returns the exact list of columns."))); } + rowtype_column_len_check(tattr, rec->tup, rec->tupdesc, valtype, anum); newvalues[fnum] = exec_simple_cast_value(estate, value, valtype, reqtype, valtypmod, isnull); newnulls[fnum] = isnull; } @@ -9482,6 +9508,7 @@ static void exec_move_row_from_fields(PLpgSQL_execstate *estate, PLpgSQL_datum * /* Walk over destination columns */ for (fnum = 0; fnum < vtd_natts; fnum++) { Form_pg_attribute attr = TupleDescAttr(var_tupdesc, fnum); + Form_pg_attribute tattr = TupleDescAttr(tupdesc, fnum); Datum value; bool isnull; Oid valtype; @@ -9504,6 +9531,7 @@ static void exec_move_row_from_fields(PLpgSQL_execstate *estate, PLpgSQL_datum * errdetail("%s check is active.", "strict_multi_assignment"), errhint("Make sure the query returns the exact list of columns."))); } + rowtype_column_len_check(tattr, var_tup, var_tupdesc, valtype, fnum); newvalues[fnum] = exec_simple_cast_value(estate, value, valtype, reqtype, valtypmod, isnull); newnulls[fnum] = isnull; } diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 2d33e99bab..bf9a23285a 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -187,6 +187,134 @@ INFO: 2 (1 row) +-- test: max len +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +create table t1(col1 tinyint primary key,col2 varchar(10)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" +declare + cursor case1 is select * from t1; + source case1%rowtype:=(200,'abcdeabcedone'); +begin + raise notice '% , %',source.col1,source.col2; +end; +/ +ERROR: value too long for type character varying(10) +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +declare + cursor case1 is select * from t1; + source case1%rowtype; +begin + source:=(200,'abcdeabcedone'); + raise notice '% , %',source.col1,source.col2; +end; +/ +ERROR: value too long for type character varying(10) +CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment +drop table if exists t1; +-- test:pkg head max len +create table test12(col1 varchar(10), col2 varchar(10)); +insert into test12 values ('a', 'aa'); +insert into test12 values ('a', 'aa'); +insert into test12 values ('b', 'bb'); +create table test22(col1 varchar2, col2 varchar2); +insert into test22 values ('dsasdad6sad','d6sasdadsad'); +create or replace package pck3p is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype:=('dsasdad6sad','d6sasdadsad'); +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3p; +/ +ERROR: value too long for type character varying(10) +CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization +create or replace package body pck3p is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test12; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + a.col1:='dsasdadsad'; + raise info '%', a.col1; +end; +end pck3p; +/ +ERROR: package spec not found +call pck3p.ppp1(); +ERROR: schema "pck3p" does not exist +-- test:pkg body max len +create or replace package pck3p is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype:=('GJHGH','TYUTD'); +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3p; +/ +create or replace package body pck3p is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test12; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + a.col1:='dsasdaGJHGdsad'; + raise info '%', a.col1; +end; +end pck3p; +/ +call pck3p.ppp1(); +ERROR: value too long for type character varying(10) +--?.* +SQL statement "CALL ppp2(var1)" +PL/pgSQL function ppp1() line 6 at PERFORM +-- test:cursor fetch max len +create or replace package pck3p is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype:=('GJHGH','TYUTD'); +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3p; +/ +create or replace package body pck3p is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test22; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + a.col1:='dsasdaGJHGdsad'; + raise info '%', a.col1; +end; +end pck3p; +/ +call pck3p.ppp1(); +ERROR: value too long for type character varying(10) +CONTEXT: PL/pgSQL function ppp1() line 5 at FETCH +drop package pck3p; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_cursor_rowtype.ppp1() +--?.* +drop table test12; +drop table test22; create table test12(col1 varchar2,col2 varchar2); insert into test12 values ('a', 'aa'); insert into test12 values ('b', 'bb'); diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index e6b3d09208..99aa5ddbfb 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -173,6 +173,127 @@ end; call pro_cursor_no_args_2(); +-- test: max len +drop table if exists t1; +create table t1(col1 tinyint primary key,col2 varchar(10)); + +declare + cursor case1 is select * from t1; + source case1%rowtype:=(200,'abcdeabcedone'); +begin + raise notice '% , %',source.col1,source.col2; +end; +/ + +declare + cursor case1 is select * from t1; + source case1%rowtype; +begin + source:=(200,'abcdeabcedone'); + raise notice '% , %',source.col1,source.col2; +end; +/ +drop table if exists t1; +-- test:pkg head max len +create table test12(col1 varchar(10), col2 varchar(10)); +insert into test12 values ('a', 'aa'); +insert into test12 values ('a', 'aa'); +insert into test12 values ('b', 'bb'); +create table test22(col1 varchar2, col2 varchar2); +insert into test22 values ('dsasdad6sad','d6sasdadsad'); + +create or replace package pck3p is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype:=('dsasdad6sad','d6sasdadsad'); +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3p; +/ + +create or replace package body pck3p is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test12; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + a.col1:='dsasdadsad'; + raise info '%', a.col1; +end; +end pck3p; +/ + +call pck3p.ppp1(); + +-- test:pkg body max len +create or replace package pck3p is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype:=('GJHGH','TYUTD'); +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3p; +/ + +create or replace package body pck3p is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test12; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + a.col1:='dsasdaGJHGdsad'; + raise info '%', a.col1; +end; +end pck3p; +/ + +call pck3p.ppp1(); + +-- test:cursor fetch max len +create or replace package pck3p is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype:=('GJHGH','TYUTD'); +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3p; +/ + +create or replace package body pck3p is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test22; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + a.col1:='dsasdaGJHGdsad'; + raise info '%', a.col1; +end; +end pck3p; +/ + +call pck3p.ppp1(); +drop package pck3p; +drop table test12; +drop table test22; + create table test12(col1 varchar2,col2 varchar2); insert into test12 values ('a', 'aa'); insert into test12 values ('b', 'bb'); -- Gitee From 94fce5b3255f3eaacd922f1d99fa03a3ed4ffb60 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Mon, 12 Aug 2024 16:53:23 +0800 Subject: [PATCH 171/347] =?UTF-8?q?=E7=A6=81=E6=AD=A2=E8=8E=B7=E5=8F=96?= =?UTF-8?q?=E5=B5=8C=E5=A5=97=E6=B8=B8=E6=A0=87=E7=9A=84rowtype?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 12 ++ .../regress/expected/cursor_expression.out | 126 ++++++------------ .../expected/plpgsql_cursor_rowtype.out | 30 +++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 29 ++++ 4 files changed, 114 insertions(+), 83 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index c4acf98074..cbe3024c1e 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1106,6 +1106,7 @@ static TupleDesc get_cursor_tupledesc_exec(PLpgSQL_expr* expr, bool isOnlySelect Node *parsetree = (Node *)lfirst(cell); t_thrd.postgres_cxt.cur_command_tag = transform_node_tag(parsetree); if (nodeTag(parsetree) == T_SelectStmt) { + ListCell* target_lc = NULL; if (checkSelectIntoParse((SelectStmt*)parsetree)) { list_free_deep(parsetreeList); ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1114,6 +1115,17 @@ static TupleDesc get_cursor_tupledesc_exec(PLpgSQL_expr* expr, bool isOnlySelect errcause("feature not supported"), erraction("modify the query"))); } + SelectStmt *select_stmt = (SelectStmt*)parsetree; + foreach(target_lc, select_stmt->targetList) { + Node *res_node = (Node*)lfirst(target_lc); + if (res_node->type == T_ResTarget) { + ResTarget* res_target = (ResTarget*)res_node; + if (res_target->val->type == T_CursorExpression) { + ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("obtain rowtype form nested cursor is not supported"))); + } + } + } } else { if (isOnlySelect) { expr->func->pre_parse_trig = temp_pre_parse_trig; diff --git a/src/test/regress/expected/cursor_expression.out b/src/test/regress/expected/cursor_expression.out index a0b5eb6815..fe470f5703 100644 --- a/src/test/regress/expected/cursor_expression.out +++ b/src/test/regress/expected/cursor_expression.out @@ -701,15 +701,9 @@ begin end; / call test_cursor_2(); -NOTICE: zhangsan -NOTICE: lisi -NOTICE: wangwu -NOTICE: heliu - test_cursor_2 ---------------- - -(1 row) - +ERROR: execute failed when parse the query: SELECT e.name as name, CURSOR(SELECT e1.name FROM employees e1)abc FROM employees e +DETAIL: obtain rowtype form nested cursor is not supported +CONTEXT: PL/pgSQL function test_cursor_2() during initialization of execution state drop procedure test_cursor_2; create or replace procedure test_cursor_2 as @@ -725,15 +719,9 @@ begin end; / call test_cursor_2(); -NOTICE: zhangsan -NOTICE: lisi -NOTICE: wangwu -NOTICE: heliu - test_cursor_2 ---------------- - -(1 row) - +ERROR: execute failed when parse the query: SELECT e.name as name, CURSOR(SELECT e1.name FROM employees e1)abc FROM employees e +DETAIL: obtain rowtype form nested cursor is not supported +CONTEXT: PL/pgSQL function test_cursor_2() during initialization of execution state drop procedure test_cursor_2; create or replace procedure test_cursor_2 as @@ -751,15 +739,9 @@ begin end; / call test_cursor_2(); -NOTICE: name : zhangsan -NOTICE: name : lisi -NOTICE: name : wangwu -NOTICE: name : heliu - test_cursor_2 ---------------- - -(1 row) - +ERROR: execute failed when parse the query: SELECT e.name as name, CURSOR(SELECT e1.name FROM employees e1)abc FROM employees e +DETAIL: obtain rowtype form nested cursor is not supported +CONTEXT: PL/pgSQL function test_cursor_2() during initialization of execution state drop procedure test_cursor_2; create or replace procedure test_cursor_2 as @@ -781,31 +763,9 @@ begin end; / call test_cursor_2(); -NOTICE: zhangsan -NOTICE: last_name : zhangsan -NOTICE: last_name : lisi -NOTICE: last_name : wangwu -NOTICE: last_name : heliu -NOTICE: lisi -NOTICE: last_name : zhangsan -NOTICE: last_name : lisi -NOTICE: last_name : wangwu -NOTICE: last_name : heliu -NOTICE: wangwu -NOTICE: last_name : zhangsan -NOTICE: last_name : lisi -NOTICE: last_name : wangwu -NOTICE: last_name : heliu -NOTICE: heliu -NOTICE: last_name : zhangsan -NOTICE: last_name : lisi -NOTICE: last_name : wangwu -NOTICE: last_name : heliu - test_cursor_2 ---------------- - -(1 row) - +ERROR: execute failed when parse the query: SELECT e.name as name, CURSOR(SELECT e1.name FROM employees e1)abc FROM employees e +DETAIL: obtain rowtype form nested cursor is not supported +CONTEXT: PL/pgSQL function test_cursor_2() during initialization of execution state drop procedure test_cursor_2; -- create table t_cursor_0011_01(department_id int, department_name varchar(50)); @@ -845,59 +805,59 @@ begin end; / call pro_cursor_0011_02(); -NOTICE: department_name : 3 -NOTICE: department employees info : pro +NOTICE: department_name : 3 +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department_name : 3 -NOTICE: department employees info : pro +NOTICE: department_name : 3 +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 1 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department employees info : pro +NOTICE: department employees info : pro NOTICE: employees_id_var : 2 -NOTICE: department_name : 2 -NOTICE: department employees info : rd +NOTICE: department_name : 2 +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department_name : 2 -NOTICE: department employees info : rd +NOTICE: department_name : 2 +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 1 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department employees info : rd +NOTICE: department employees info : rd NOTICE: employees_id_var : 2 -NOTICE: department_name : 1 -NOTICE: department employees info : sale +NOTICE: department_name : 1 +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 -NOTICE: department_name : 1 -NOTICE: department employees info : sale +NOTICE: department_name : 1 +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 1 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 -NOTICE: department employees info : sale +NOTICE: department employees info : sale NOTICE: employees_id_var : 2 pro_cursor_0011_02 -------------------- @@ -958,7 +918,7 @@ NOTICE: --?duration.* CONTEXT: PL/pgSQL function inline_code_block line 8 at FETCH -NOTICE: company_name : zhangsan +NOTICE: company_name : zhangsan NOTICE: QueryPlan diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 2d33e99bab..0389d0278d 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -1179,6 +1179,36 @@ INFO: after loop: drop package pck_for; NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop table test1; +ERROR: table "test1" does not exist +drop table test2; +ERROR: table "test2" does not exist +create table test1(id int, name varchar, job varchar); +create table test2(id int, age int); +insert into test1 values (1, 'zhang', 'worker'),(2, 'li', 'teacher'),(3, 'wang', 'engineer'); +insert into test2 values (1, 20),(2, 30),(3, 40); +DECLARE + CURSOR c1 IS SELECT t.age, CURSOR(SELECT name FROM test1 t1 where t1.id = t.id) abc FROM test2 t;-- 在匿名块中使用游标表达式样例 + age_temp int; + name_temp varchar; + type emp_cur_type is ref cursor; + c2 emp_cur_type; + source c1%rowtype; +BEGIN + OPEN c1; + loop + fetch c1 into source; + exit when c1%notfound; + raise notice '%',source; + end loop; + close c1; +END; +/ +ERROR: execute failed when parse the query: SELECT t.age, CURSOR(SELECT name FROM test1 t1 where t1.id = t.id)abc FROM test2 t +DETAIL: obtain rowtype form nested cursor is not supported +CONTEXT: PL/pgSQL function inline_code_block during initialization of execution state +drop table test1; +drop table test2; -- (c) select only one col create or replace package pck_for is temp_result int; diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index e6b3d09208..4eb337f1a5 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -883,6 +883,35 @@ end pck_for; call pck_for.p1(); drop package pck_for; +drop table test1; +drop table test2; + +create table test1(id int, name varchar, job varchar); +create table test2(id int, age int); +insert into test1 values (1, 'zhang', 'worker'),(2, 'li', 'teacher'),(3, 'wang', 'engineer'); +insert into test2 values (1, 20),(2, 30),(3, 40); + +DECLARE + CURSOR c1 IS SELECT t.age, CURSOR(SELECT name FROM test1 t1 where t1.id = t.id) abc FROM test2 t;-- 在匿名块中使用游标表达式样例 + age_temp int; + name_temp varchar; + type emp_cur_type is ref cursor; + c2 emp_cur_type; + source c1%rowtype; +BEGIN + OPEN c1; + loop + fetch c1 into source; + exit when c1%notfound; + raise notice '%',source; + end loop; + close c1; +END; +/ + +drop table test1; +drop table test2; + -- (c) select only one col create or replace package pck_for is temp_result int; -- Gitee From c93f5e203aed3cf71eb8cdab5781c2fbab0a6641 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cyaoxin=E2=80=9D?= Date: Fri, 9 Aug 2024 18:05:32 +0800 Subject: [PATCH 172/347] =?UTF-8?q?1=E3=80=81fix=20move=20partitions=202?= =?UTF-8?q?=E3=80=81optimit=20vacuum=20full?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/tablecmds.cpp | 91 ++++++---- .../storage/access/heap/rewriteheap.cpp | 50 +++--- .../storage/access/rmgrdesc/uheapdesc.cpp | 14 +- .../storage/access/table/tableam.cpp | 3 - .../access/transam/extreme_rto/dispatcher.cpp | 3 +- .../transam/parallel_recovery/dispatcher.cpp | 3 +- .../access/ustore/knl_uextremeredo.cpp | 29 ++++ .../storage/access/ustore/knl_uheap.cpp | 37 ++++ .../storage/access/ustore/knl_undoaction.cpp | 162 ++++++++++++++++++ .../storage/access/ustore/knl_uredo.cpp | 11 ++ .../storage/access/ustore/knl_uvisibility.cpp | 2 +- .../storage/replication/logical/decode.cpp | 1 + .../replication/logical/logical_parse.cpp | 1 + src/include/access/ustore/knl_uheap.h | 2 + src/include/access/ustore/knl_undorequest.h | 16 +- src/include/access/ustore/knl_uredo.h | 1 + src/include/access/xlogproc.h | 4 + 17 files changed, 363 insertions(+), 67 deletions(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 4fe2a236ca..93868211d5 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -34,6 +34,7 @@ #include "access/tableam.h" #include "access/ustore/knl_uheap.h" #include "access/ustore/knl_uscan.h" +#include "access/ustore/knl_undorequest.h" #include "access/multixact.h" #include "catalog/catalog.h" #include "catalog/dependency.h" @@ -20647,24 +20648,28 @@ static void copy_relation_data(Relation rel, SMgrRelation* dstptr, ForkNumber fo UnlockReleaseBuffer(buf); } else { - /* - * WAL-log the copied page. Unfortunately we don't know what kind of a - * page this is, so we have to log the full page including any unused - * space. - */ - if (use_wal) { - log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false, &tde_info); - } - - if (RelationisEncryptEnable(rel)) { - bufToWrite = PageDataEncryptIfNeed(page, &tde_info, true); + if (RelationIsUstoreFormat(rel)) { + if (ExecuteUndoActionsPageForPartition(rel, dst, forkNum, blkno, blkno, ROLLBACK_OP_FOR_MOVE_TBLSPC)) { + *dstptr = dst = smgropen(newFileNode, backendId); + src = rel->rd_smgr; + } } else { - bufToWrite = page; + /* + * WAL-log the copied page. Unfortunately we don't know what kind of a + * page this is, so we have to log the full page including any unused + * space. + */ + if (use_wal) { + log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false, &tde_info); + } + if (RelationisEncryptEnable(rel)) { + bufToWrite = PageDataEncryptIfNeed(page, &tde_info, true); + } else { + bufToWrite = page; + } + PageSetChecksumInplace((Page)bufToWrite, blkno); + smgrextend(dst, forkNum, blkno, bufToWrite, false); } - - PageSetChecksumInplace((Page)bufToWrite, blkno); - - smgrextend(dst, forkNum, blkno, bufToWrite, true); } } @@ -20879,25 +20884,40 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char MarkBufferDirty(buf); UnlockReleaseBuffer(buf); } else { - /* - * XLOG stuff - * Retry to open smgr in case it is cloesd when we process SI messages - */ RelationOpenSmgr(dest); - if (use_wal) { - log_newpage(&dest->rd_smgr->smgr_rnode.node, forkNum, dest_blkno, page, true, &tde_info); - } - - if (RelationisEncryptEnable(src)) { - bufToWrite = PageDataEncryptIfNeed(page, &tde_info, true); + if (RelationIsUstoreFormat(src)) { + PartitionToastInfo pToastInfo = {InvalidOid, InvalidOid, InvalidOid, InvalidOid, NULL, NULL}; + if (OidIsValid(destToastOid)) { + pToastInfo.srcPartTupleOid = srcToastOid; + pToastInfo.destPartTupleOid = destToastOid; + pToastInfo.tupDesc = srcTupleDesc; + pToastInfo.chunkIdHashTable = chunkIdHashTable; + } else if (RelationIsToast(dest)) { + pToastInfo.tupDesc = src->rd_att; + pToastInfo.srcToastRelOid = src->rd_id; + pToastInfo.destToastRelOid = dest->rd_id; + pToastInfo.chunkIdHashTable = chunkIdHashTable; + } + if (ExecuteUndoActionsPageForPartition(src, dest->rd_smgr, forkNum, src_blkno, dest_blkno, + ROLLBACK_OP_FOR_MERGE_PARTITION, &pToastInfo)) { + RelationOpenSmgr(dest); + } } else { - bufToWrite = page; - } + if (use_wal) { + log_newpage(&dest->rd_smgr->smgr_rnode.node, forkNum, dest_blkno, page, true, &tde_info); + } + + if (RelationisEncryptEnable(src)) { + bufToWrite = PageDataEncryptIfNeed(page, &tde_info, true); + } else { + bufToWrite = page; + } - /* heap block mix in the block number to checksum. need recalculate */ - PageSetChecksumInplace((Page)bufToWrite, dest_blkno); + /* heap block mix in the block number to checksum. need recalculate */ + PageSetChecksumInplace((Page)bufToWrite, dest_blkno); - smgrextend(dest->rd_smgr, forkNum, dest_blkno, bufToWrite, true); + smgrextend(dest->rd_smgr, forkNum, dest_blkno, bufToWrite, false); + } } } @@ -27409,6 +27429,10 @@ static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd) return; } + if (RelationIsUstoreFormat(partTableRel)) { + PreventTransactionChain(true, "ALTER TALBE MERGE PARITITON"); + } + /* the source partitions, must be at least 2, to merge into 1 partition */ if (partNum < 2) { ereport(ERROR, @@ -27943,6 +27967,11 @@ static void ExecUndoActionsPageForRelation(Relation rel) return; } + for (BlockNumber blkno = 0; blkno < srcHeapBlocks; blkno ++) { + ExecuteUndoActionsPageForPartition(rel, rel->rd_smgr, MAIN_FORKNUM, blkno, blkno, + ROLLBACK_OP_FOR_EXCHANGE_PARTITION); + } + RelationCloseSmgr(rel); } diff --git a/src/gausskernel/storage/access/heap/rewriteheap.cpp b/src/gausskernel/storage/access/heap/rewriteheap.cpp index 5c7231abb9..0b430f5b70 100644 --- a/src/gausskernel/storage/access/heap/rewriteheap.cpp +++ b/src/gausskernel/storage/access/heap/rewriteheap.cpp @@ -294,8 +294,11 @@ RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, Transactio * even new_heap is a partitional relation, its rd_rel is copied from its pareent * relation. so don't worry the compress property about new_heap; */ - if (!RelationIsUstoreFormat(old_heap)) + if (!RelationIsUstoreFormat(old_heap)) { state->rs_doCmprFlag = RowRelationIsCompressed(new_heap); + } else { + state->rs_doCmprFlag = false; + } if (state->rs_doCmprFlag) { state->rs_compressor = New(rw_cxt) PageCompress(new_heap, rw_cxt); @@ -320,21 +323,23 @@ RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, Transactio state->rs_size = 0; } - /* Initialize hash tables used to track update chains */ - errorno = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); - securec_check(errorno, "", ""); - hash_ctl.keysize = sizeof(TidHashKey); - hash_ctl.entrysize = sizeof(UnresolvedTupData); - hash_ctl.hcxt = state->rs_cxt; - hash_ctl.hash = tag_hash; + if (!RelationIsUstoreFormat(old_heap)) { + /* Initialize hash tables used to track update chains */ + errorno = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); + securec_check(errorno, "", ""); + hash_ctl.keysize = sizeof(TidHashKey); + hash_ctl.entrysize = sizeof(UnresolvedTupData); + hash_ctl.hcxt = state->rs_cxt; + hash_ctl.hash = tag_hash; - state->rs_unresolved_tups = hash_create("Rewrite / Unresolved ctids", 128, /* arbitrary initial size */ - &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + state->rs_unresolved_tups = hash_create("Rewrite / Unresolved ctids", 128, /* arbitrary initial size */ + &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - hash_ctl.entrysize = sizeof(OldToNewMappingData); + hash_ctl.entrysize = sizeof(OldToNewMappingData); - state->rs_old_new_tid_map = hash_create("Rewrite / Old to new tid map", 128, /* arbitrary initial size */ - &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + state->rs_old_new_tid_map = hash_create("Rewrite / Old to new tid map", 128, /* arbitrary initial size */ + &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + } (void)MemoryContextSwitchTo(old_cxt); @@ -367,7 +372,11 @@ static void rewrite_write_one_page(RewriteState state, Page page) STORAGE_SPACE_OPERATION(state->rs_new_rel, BLCKSZ); if (state->rs_use_wal) { - log_newpage(&state->rs_new_rel->rd_node, MAIN_FORKNUM, state->rs_blockno, page, true, &tde_info); + if (!RelationIsUstoreFormat(state->rs_old_rel)) { + log_newpage(&state->rs_new_rel->rd_node, MAIN_FORKNUM, state->rs_blockno, page, true, &tde_info); + } else { + LogUHeapNewPage(&state->rs_new_rel->rd_node, MAIN_FORKNUM, state->rs_blockno, page, true, &tde_info); + } } RelationOpenSmgr(state->rs_new_rel); @@ -416,13 +425,14 @@ void end_heap_rewrite(RewriteState state) * Write any remaining tuples in the UnresolvedTups table. If we have any * left, they should in fact be dead, but let's err on the safe side. */ - hash_seq_init(&seq_status, state->rs_unresolved_tups); + if (!RelationIsUstoreFormat(state->rs_old_rel)) { + hash_seq_init(&seq_status, state->rs_unresolved_tups); - while ((unresolved = (UnresolvedTupData *)hash_seq_search(&seq_status)) != NULL) { - ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid); - raw_heap_insert(state, unresolved->tuple); + while ((unresolved = (UnresolvedTupData *)hash_seq_search(&seq_status)) != NULL) { + ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid); + raw_heap_insert(state, unresolved->tuple); + } } - /* Write the last page, if any */ if (state->rs_buffer_valid) { rewrite_write_one_page(state, state->rs_buffer); @@ -1379,7 +1389,7 @@ static void RawUHeapInsert(RewriteState state, UHeapTuple tup) if (!state->rs_buffer_valid) { UHeapPageHeaderData *uheappage = (UHeapPageHeaderData *)page; /* Initialize a new empty page */ - UPageInit(page, BLCKSZ, UHEAP_SPECIAL_SIZE, RelationGetInitTd(state->rs_new_rel)); + UPageInit(page, BLCKSZ, UHEAP_SPECIAL_SIZE, UHEAP_MIN_TD); uheappage->pd_xid_base = u_sess->utils_cxt.RecentXmin - FirstNormalTransactionId; uheappage->pd_multi_base = 0; state->rs_buffer_valid = true; diff --git a/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp index 70a65c413c..560bd3a4f3 100644 --- a/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp @@ -66,28 +66,22 @@ const char* uheap_type_name(uint8 subtype) switch (info) { case XLOG_UHEAP_INSERT: return "uheap_insert"; - break; case XLOG_UHEAP_DELETE: return "uheap_delete"; - break; case XLOG_UHEAP_UPDATE: return "uheap_update"; - break; case XLOG_UHEAP_FREEZE_TD_SLOT: return "uheap_freeze"; - break; case XLOG_UHEAP_INVALID_TD_SLOT: return "uheap_invalid_slot"; - break; case XLOG_UHEAP_CLEAN: return "uheap_clean"; - break; case XLOG_UHEAP_MULTI_INSERT: return "uheap_multi_insert"; - break; + case XLOG_UHEAP_NEW_PAGE: + return "uheap_new_page"; default: return "unknown_type"; - break; } } @@ -472,6 +466,10 @@ void UHeapDesc(StringInfo buf, XLogReaderState *record) } break; } + case XLOG_UHEAP_NEW_PAGE: { + appendStringInfo(buf, "new uheap page"); + break; + } default: appendStringInfo(buf, "UNKNOWN"); } diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index 200ba28a1e..25b54e8c4f 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -750,9 +750,6 @@ bool UHeapamTopsPageGetItem(Relation rel, Tuple tuple, Page page, OffsetNumber t return false; } uDiskTuple = (UHeapDiskTuple)UPageGetRowData(page, uTupleItem); - /* set freeze options for rows in merging file */ - UHeapTupleHeaderSetTDSlot(uDiskTuple, UHEAPTUP_SLOT_FROZEN); - ((UHeapTuple)tuple)->disk_tuple = uDiskTuple; ((UHeapTuple)tuple)->disk_tuple_size = RowPtrGetLen(uTupleItem); ((UHeapTuple)tuple)->tupTableType = UHEAP_TUPLE; diff --git a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp index c63c0aea78..b10e835362 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp @@ -209,7 +209,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { #ifdef ENABLE_MOT {DispatchMotRecord, NULL, RM_MOT_ID, 0, 0}, #endif - { DispatchUHeapRecord, RmgrRecordInfoValid, RM_UHEAP_ID, XLOG_UHEAP_INSERT, XLOG_UHEAP_MULTI_INSERT }, + { DispatchUHeapRecord, RmgrRecordInfoValid, RM_UHEAP_ID, XLOG_UHEAP_INSERT, XLOG_UHEAP_NEW_PAGE }, { DispatchUHeap2Record, RmgrRecordInfoValid, RM_UHEAP2_ID, XLOG_UHEAP2_BASE_SHIFT, XLOG_UHEAP2_EXTEND_TD_SLOTS }, { DispatchUHeapUndoRecord, RmgrRecordInfoValid, RM_UNDOLOG_ID, XLOG_UNDO_EXTEND, XLOG_UNDO_DISCARD }, { DispatchUndoActionRecord, RmgrRecordInfoValid, RM_UHEAPUNDO_ID, @@ -1599,6 +1599,7 @@ static void GetUndoSlotIds(XLogReaderState *record) } case XLOG_UHEAP_FREEZE_TD_SLOT: case XLOG_UHEAP_INVALID_TD_SLOT: + case XLOG_UHEAP_NEW_PAGE: case XLOG_UHEAP_CLEAN: { /* No undo actions to redo */ return; diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp index 81151b609f..5763320478 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp @@ -202,7 +202,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { #ifdef ENABLE_MOT { DispatchMotRecord, NULL, RM_MOT_ID, 0, 0}, #endif - { DispatchUHeapRecord, RmgrRecordInfoValid, RM_UHEAP_ID, XLOG_UHEAP_INSERT, XLOG_UHEAP_MULTI_INSERT }, + { DispatchUHeapRecord, RmgrRecordInfoValid, RM_UHEAP_ID, XLOG_UHEAP_INSERT, XLOG_UHEAP_NEW_PAGE }, { DispatchUHeap2Record, RmgrRecordInfoValid, RM_UHEAP2_ID, XLOG_UHEAP2_BASE_SHIFT, XLOG_UHEAP2_EXTEND_TD_SLOTS }, { DispatchUHeapUndoRecord, RmgrRecordInfoValid, RM_UNDOLOG_ID, XLOG_UNDO_EXTEND, XLOG_UNDO_DISCARD }, { DispatchUndoActionRecord, RmgrRecordInfoValid, RM_UHEAPUNDO_ID, @@ -2310,6 +2310,7 @@ static bool DispatchUHeapRecord(XLogReaderState *record, List *expectedTLIs, Tim } case XLOG_UHEAP_FREEZE_TD_SLOT: case XLOG_UHEAP_INVALID_TD_SLOT: + case XLOG_UHEAP_NEW_PAGE: case XLOG_UHEAP_CLEAN: { GetWorkersIdWithOutUndoBuffer(record); hasUndoAction = false; diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp index 48d5bd9ee5..15527a16db 100644 --- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp @@ -229,6 +229,19 @@ static XLogRecParseState *UHeapXlogCleanParseBlock(XLogReaderState *record, uint return recordstatehead; } +static XLogRecParseState *UHeapXlogNewPageParseBlock(XLogReaderState *record, uint32 *blocknum) +{ + XLogRecParseState *recordstatehead = NULL; + + *blocknum = 1; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + if (recordstatehead == NULL) { + return NULL; + } + XLogRecSetBlockDataState(record, UHEAP_NEWPAGE_ORIG_BLOCK_NUM, recordstatehead); + return recordstatehead; +} + XLogRecParseState *UHeapRedoParseToBlock(XLogReaderState *record, uint32 *blocknum) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; @@ -262,6 +275,9 @@ XLogRecParseState *UHeapRedoParseToBlock(XLogReaderState *record, uint32 *blockn case XLOG_UHEAP_MULTI_INSERT: recordblockstate = UHeapXlogMultiInsertParseBlock(record, blocknum); break; + case XLOG_UHEAP_NEW_PAGE: + recordblockstate = UHeapXlogNewPageParseBlock(record, blocknum); + break; default: ereport(PANIC, (errmsg("UHeapRedoParseToBlock: unknown op code %u", info))); } @@ -1486,6 +1502,16 @@ static void UHeapXlogCleanBlock(XLogBlockHead *blockhead, XLogBlockDataParse *bl } } +static void UHeapXlogNewpageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, + RedoBufferInfo *bufferinfo) +{ + XLogBlockDataParse *datadecode = blockdatarec; + XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); + if (action != BLK_RESTORED) + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UHeapXlogNewpageBlock unexpected result when restoring backup block"))); +} + void UHeapRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo) { uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; @@ -1512,6 +1538,9 @@ void UHeapRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatar case XLOG_UHEAP_CLEAN: UHeapXlogCleanBlock(blockhead, blockdatarec, bufferinfo); break; + case XLOG_UHEAP_NEW_PAGE: + UHeapXlogNewpageBlock(blockhead, blockdatarec, bufferinfo); + break; default: ereport(PANIC, (errmsg("UHeapRedoDataBlock: unknown op code %u", info))); } diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 1e5983cc9e..220b17284b 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -24,6 +24,7 @@ #include "storage/lmgr.h" #include "storage/lock/lock.h" #include "storage/freespace.h" +#include "storage/smgr/segment_internal.h" #include "access/sysattr.h" #include "access/tuptoaster.h" #include "access/xact.h" @@ -5264,6 +5265,42 @@ XLogRecPtr LogUHeapClean(Relation reln, Buffer buffer, OffsetNumber target_offnu return recptr; } +XLogRecPtr LogUHeapNewPage(RelFileNode* rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std, + TdeInfo* tdeinfo) +{ + int flags; + XLogRecPtr recptr; + if (IsSegmentFileNode(*rnode)) { + /* + * Make sure extents in the segment are created before this xlog, otherwise Standby does not know where to + * read the new page when replaying this xlog. + */ + seg_preextend(*rnode, forkNum, blkno); + } + + /* NO ELOG(ERROR) from here till newpage op is logged */ + START_CRIT_SECTION(); + flags = REGBUF_FORCE_IMAGE; + if (page_std) { + flags |= REGBUF_STANDARD; + } + + XLogBeginInsert(); + XLogRegisterBlock(0, rnode, forkNum, blkno, page, flags, NULL, tdeinfo); + recptr = XLogInsert(RM_UHEAP_ID, XLOG_UHEAP_NEW_PAGE); + + /* + * The page may be uninitialized. If so, we can't set the LSN and TLI + * because that would corrupt the page. + */ + if (!PageIsNew(page)) { + PageSetLSN(page, recptr); + } + + END_CRIT_SECTION(); + return recptr; +} + /* * UHeapExecPendingUndoActions - apply any pending rollback on the input buffer * diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index ac98c2aa7a..bfff75ed32 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -17,6 +17,9 @@ #include "knl/knl_variable.h" #include "access/heapam.h" +#include "access/tableam.h" +#include "access/transam.h" +#include "access/xact.h" #include "access/ustore/knl_uheap.h" #include "access/ustore/knl_undorequest.h" #include "access/ustore/knl_uredo.h" @@ -36,6 +39,18 @@ static UHeapDiskTuple CopyTupleFromUndoRecord(UndoRecord *undorecord, Buffer buf static void RestoreXactFromUndoRecord(UndoRecord *undorecord, Buffer buffer, UHeapDiskTuple tuple); static void LogUHeapUndoActions(UHeapUndoActionWALInfo *walInfo, Relation rel); +struct ChunkIdHashKey { + Oid toastTableOid; + Oid oldChunkId; +}; + +struct OldToNewChunkIdMappingData { + ChunkIdHashKey key; + Oid newChunkId; +}; + +typedef OldToNewChunkIdMappingData* OldToNewChunkIdMapping; + static int GetUndoApplySize() { uint64 undoApplySize = (uint64)u_sess->attr.attr_memory.maintenance_work_mem * 1024L; @@ -1059,3 +1074,150 @@ bool UHeapUndoActionsFindRelidByRelfilenode(RelFileNode *relfilenode, Oid *reloi *partitionoid = partitionId; return true; } + +bool ExecuteUndoActionsPageForPartition(Relation src, SMgrRelation dest, ForkNumber forkNum, BlockNumber srcBlkno, + BlockNumber destBlkno, RollBackTypeForAlterTable opType, PartitionToastInfo *toastInfo) +{ + RelationOpenSmgr(src); + if (src == NULL || dest == NULL || srcBlkno == InvalidBlockNumber || destBlkno == InvalidBlockNumber + || src->rd_smgr == NULL) { + ereport(LOG, (errmodule(MOD_USTORE), errmsg("Do not need to rollback for partition."))); + return false; + } + + errno_t rc = EOK; + char *bufToWrite = NULL; + RelFileNode srcNode = src->rd_smgr->smgr_rnode.node; + RelFileNode destNode = dest->smgr_rnode.node; + BackendId destBackEnd = dest->smgr_rnode.backend; + bool sameRel = (dest == src->rd_smgr); + bool rollBacked = false; + Buffer buffer = ReadBufferWithoutRelcache(src->rd_smgr->smgr_rnode.node, forkNum, srcBlkno, + RBM_NORMAL, NULL, NULL); + Page mPage = BufferGetPage(buffer); + int numSlots = GetTDCount((UHeapPageHeaderData *)mPage); + TD *tdSlots = (TD *)PageGetTDPointer(mPage); + UndoRecPtr *urecptr = (UndoRecPtr *)palloc0(numSlots * sizeof(UndoRecPtr)); + TransactionId *fxid = (TransactionId *)palloc0(numSlots * sizeof(TransactionId)); + int nAborted = 0; + + for (int slotNo = 0; slotNo < numSlots; slotNo++) { + TransactionId xid = tdSlots[slotNo].xactid; + if (!TransactionIdIsValid(xid) || TransactionIdIsCurrentTransactionId(xid) || + UHeapTransactionIdDidCommit(xid)) { + continue; + } + urecptr[nAborted] = tdSlots[slotNo].undo_record_ptr; + fxid[nAborted] = xid; + nAborted++; + } + if (nAborted > 0) { + for (int i = 0; i < nAborted; i++) { + ExecuteUndoActionsPage(urecptr[i], src, buffer, fxid[i]); + } + rollBacked = true; + RelationOpenSmgr(src); + if (sameRel) { + dest = src->rd_smgr; + } else { + dest = smgropen(destNode, destBackEnd); + } + if (!RelFileNodeEquals(srcNode, src->rd_smgr->smgr_rnode.node) || + !RelFileNodeEquals(destNode, dest->smgr_rnode.node)) { + ereport(ERROR, (errmodule(MOD_USTORE), + errmsg("Relfilenode not equal, skip rokkback."))); + } + } + + if (opType == ROLLBACK_OP_FOR_EXCHANGE_PARTITION && !rollBacked) { + ReleaseBuffer(buffer); + pfree_ext(urecptr); + pfree_ext(fxid); + return rollBacked; + } + + Page page = (Page)palloc0(BLCKSZ); + rc = memcpy_s(page, BLCKSZ, mPage, BLCKSZ); + securec_check(rc, "\0", "\0"); + + if (toastInfo != NULL) { + HeapTupleData tuple; + OffsetNumber tupleNo = 0; + OffsetNumber tupleNum = tableam_tops_page_get_max_offsetnumber(src, page); + for (tupleNo = FirstOffsetNumber; tupleNo <= tupleNum; tupleNo++) { + if (!tableam_tops_page_get_item(src, &tuple, page, tupleNo, InvalidBlockNumber)) { + continue; + } + ChunkIdHashKey hashkey; + OldToNewChunkIdMapping mapping = NULL; + + if (OidIsValid(toastInfo->destPartTupleOid)) { + Assert(toastInfo->tupDesc != NULL); + int numAttrs = toastInfo->tupDesc->natts; + Datum values[numAttrs]; + bool isNull[numAttrs]; + tableam_tops_deform_tuple(&tuple, toastInfo->tupDesc, values, isNull); + for (int i = 0; i < numAttrs; i++) { + struct varlena* value = (struct varlena*)DatumGetPointer(values[i]); + if (toastInfo->tupDesc->attrs[i].attlen == -1 && !isNull[i] && VARATT_IS_EXTERNAL(value)) { + struct varatt_external* toastPointer = NULL; + toastPointer = (varatt_external*)(VARDATA_EXTERNAL((varattrib_1b_e*)(value))); + toastPointer->va_toastrelid = toastInfo->destPartTupleOid; + rc = memset_s(&hashkey, sizeof(hashkey), 0, sizeof(hashkey)); + securec_check(rc, "\0", "\0"); + hashkey.toastTableOid = toastInfo->srcPartTupleOid; + hashkey.oldChunkId = toastPointer->va_valueid; + mapping = (OldToNewChunkIdMapping)hash_search(toastInfo->chunkIdHashTable, + &hashkey, HASH_FIND, NULL); + + if (PointerIsValid(mapping)) { + toastPointer->va_valueid = mapping->newChunkId; + } + } + } + } else if (OidIsValid(toastInfo->destToastRelOid)) { + /* for toast, more than 1GB CLOB/BLOB the first chunk chunk_data */ + Datum values[3]; + bool isNull[3]; + tableam_tops_deform_tuple(&tuple, toastInfo->tupDesc, values, isNull); + struct varlena* value = (struct varlena*)DatumGetPointer(values[2]); + if (!isNull[2] && VARATT_IS_EXTERNAL_ONDISK_B(value)) { + struct varatt_external* toastPointer = NULL; + toastPointer = (varatt_external*)(VARDATA_EXTERNAL((varattrib_1b_e*)(value))); + Assert(toastPointer->va_toastrelid == toastInfo->srcToastRelOid); + toastPointer->va_toastrelid = toastInfo->destToastRelOid; + rc = memset_s(&hashkey, sizeof(hashkey), 0, sizeof(hashkey)); + securec_check(rc, "\0", "\0"); + hashkey.toastTableOid = toastInfo->srcToastRelOid; + hashkey.oldChunkId = toastPointer->va_valueid; + mapping = (OldToNewChunkIdMapping)hash_search(toastInfo->chunkIdHashTable, + &hashkey, HASH_FIND, NULL); + + if (PointerIsValid(mapping)) { + toastPointer->va_valueid = mapping->newChunkId; + } + } + } + } + } + + bufToWrite = (char *) palloc0(BLCKSZ); + rc = memcpy_s(bufToWrite, BLCKSZ, page, BLCKSZ); + securec_check(rc, "\0", "\0"); + if (XLogIsNeeded() && RelationNeedsWAL(src)) { + (void) LogUHeapNewPage(&dest->smgr_rnode.node, forkNum, destBlkno, bufToWrite, false, NULL); + } + PageSetChecksumInplace((Page)bufToWrite, destBlkno); + if (opType != ROLLBACK_OP_FOR_EXCHANGE_PARTITION) { + smgrextend(dest, forkNum, destBlkno, bufToWrite, false); + } else { + smgrwrite(dest, forkNum, destBlkno, bufToWrite, false); + } + + ReleaseBuffer(buffer); + pfree_ext(urecptr); + pfree_ext(fxid); + pfree_ext(page); + pfree_ext(bufToWrite); + return rollBacked; +} diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index 0f82723e4c..69c4059d5a 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -1827,6 +1827,14 @@ static void UHeapXlogFreeze(XLogReaderState *record) UnlockReleaseBuffer(buffer.buf); } } +void UHeapXlogNewPage(XLogReaderState *record) +{ + RedoBufferInfo buffer = { 0 }; + if (XLogReadBufferForRedo(record, UHEAP_NEWPAGE_ORIG_BLOCK_NUM, &buffer) != BLK_RESTORED) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("unexpected result when restoring backup block"))); + } + UnlockReleaseBuffer(buffer.buf); +} void UHeapRedo(XLogReaderState *record) { @@ -1858,6 +1866,9 @@ void UHeapRedo(XLogReaderState *record) case XLOG_UHEAP_MULTI_INSERT: UHeapXlogMultiInsert(record); break; + case XLOG_UHEAP_NEW_PAGE: + UHeapXlogNewPage(record); + break; default: ereport(PANIC, (errmsg("UHeapRedo: unknown op code %u", (uint8)info))); } diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index b5340c9984..170277d225 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -949,7 +949,7 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot savedTdSlot = tdinfo.td_slot; } if (tdinfo.cid == InvalidCommandId) { - ereport(PANIC, (errmodule(MOD_USTORE), errmsg( + ereport(ERROR, (errmodule(MOD_USTORE), errmsg( "invalid cid! " "LogInfo: undo state %d, tuple flag %u, tupXid %lu. " "OldTd: tdxid %lu, tdid %d, undoptr %lu. NewTd: tdxid %lu, tdid %d, undoptr %lu. " diff --git a/src/gausskernel/storage/replication/logical/decode.cpp b/src/gausskernel/storage/replication/logical/decode.cpp index e69e336362..87edfdeabb 100644 --- a/src/gausskernel/storage/replication/logical/decode.cpp +++ b/src/gausskernel/storage/replication/logical/decode.cpp @@ -933,6 +933,7 @@ static void DecodeUheapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_UHEAP_FREEZE_TD_SLOT: case XLOG_UHEAP_INVALID_TD_SLOT: case XLOG_UHEAP_CLEAN: + case XLOG_UHEAP_NEW_PAGE: break; case XLOG_UHEAP_MULTI_INSERT: diff --git a/src/gausskernel/storage/replication/logical/logical_parse.cpp b/src/gausskernel/storage/replication/logical/logical_parse.cpp index 9f0972ec83..aea5196bf5 100644 --- a/src/gausskernel/storage/replication/logical/logical_parse.cpp +++ b/src/gausskernel/storage/replication/logical/logical_parse.cpp @@ -515,6 +515,7 @@ void ParseUheapOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, Pa case XLOG_UHEAP_FREEZE_TD_SLOT: case XLOG_UHEAP_INVALID_TD_SLOT: case XLOG_UHEAP_CLEAN: + case XLOG_UHEAP_NEW_PAGE: break; case XLOG_UHEAP_MULTI_INSERT: diff --git a/src/include/access/ustore/knl_uheap.h b/src/include/access/ustore/knl_uheap.h index 56482806c8..7a025c3d26 100644 --- a/src/include/access/ustore/knl_uheap.h +++ b/src/include/access/ustore/knl_uheap.h @@ -178,6 +178,8 @@ bool UHeapExecPendingUndoActions(Relation rel, Buffer buffer, TransactionId xwai extern XLogRecPtr LogUHeapClean(Relation reln, Buffer buffer, OffsetNumber target_offnum, Size space_required, OffsetNumber *nowdeleted, int ndeleted, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, OffsetNumber *nowfixed, uint16 *fixedlen, uint16 nfixed, TransactionId latestRemovedXid, bool pruned); +extern XLogRecPtr LogUHeapNewPage(RelFileNode* rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std, + TdeInfo* tdeinfo = NULL); extern void SimpleUHeapDelete(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot** oldslot = NULL, TransactionId* tmfdXmin = NULL); diff --git a/src/include/access/ustore/knl_undorequest.h b/src/include/access/ustore/knl_undorequest.h index 13dc651634..9e740465f6 100644 --- a/src/include/access/ustore/knl_undorequest.h +++ b/src/include/access/ustore/knl_undorequest.h @@ -57,11 +57,20 @@ typedef enum RollbackReturnType { ROLLBACK_OK_NOEXIST = -2 } RollbackReturnType; -typedef enum RollBackTypeForAlterTabl { +typedef enum RollBackTypeForAlterTable { ROLLBACK_OP_FOR_MOVE_TBLSPC = 0, ROLLBACK_OP_FOR_MERGE_PARTITION = 1, ROLLBACK_OP_FOR_EXCHANGE_PARTITION = 2 -} RollBackTypeForAlterTabl; +} RollBackTypeForAlterTable; + +typedef struct PartitionToastInfo { + Oid srcPartTupleOid; + Oid destPartTupleOid; + Oid srcToastRelOid; + Oid destToastRelOid; + TupleDesc tupDesc; + HTAB *chunkIdHashTable; +} PartitionToastInfo; Size AsyncRollbackRequestsHashSize(void); Size AsyncRollbackHashShmemSize(void); @@ -85,4 +94,7 @@ void ExecuteUndoForInsertRecovery(Buffer buffer, OffsetNumber off, TransactionId bool UHeapUndoActionsOpenRelation(Oid reloid, Oid partitionoid, UndoRelationData *relationData); void UHeapUndoActionsCloseRelation(UndoRelationData *relationData); bool UHeapUndoActionsFindRelidByRelfilenode(RelFileNode *relfilenode, Oid *reloid, Oid *partitionoid); +bool ExecuteUndoActionsPageForPartition(Relation src, SMgrRelation dest, ForkNumber forkNum, BlockNumber srcBlkno, + BlockNumber destBlkno, RollBackTypeForAlterTable opType, PartitionToastInfo *toastInfo = NULL); + #endif diff --git a/src/include/access/ustore/knl_uredo.h b/src/include/access/ustore/knl_uredo.h index 36af9a63d7..ee3d237798 100644 --- a/src/include/access/ustore/knl_uredo.h +++ b/src/include/access/ustore/knl_uredo.h @@ -39,6 +39,7 @@ #define XLOG_UHEAP_INVALID_TD_SLOT 0x40 #define XLOG_UHEAP_CLEAN 0x50 #define XLOG_UHEAP_MULTI_INSERT 0x60 +#define XLOG_UHEAP_NEW_PAGE 0x70 #define XLOG_UHEAP_OPMASK 0x70 /* * When we insert 1st item on new page in INSERT, UPDATE, HOT_UPDATE, diff --git a/src/include/access/xlogproc.h b/src/include/access/xlogproc.h index 285376d9ac..0b1c3a4ed7 100755 --- a/src/include/access/xlogproc.h +++ b/src/include/access/xlogproc.h @@ -809,6 +809,10 @@ typedef enum { UHEAP_CLEAN_ORIG_BLOCK_NUM = 0 } XLogUHeapCleanBlockEnum; +typedef enum { + UHEAP_NEWPAGE_ORIG_BLOCK_NUM = 0 +} XLogUHeapNewPageBlockEnum; + typedef enum { UHEAP2_ORIG_BLOCK_NUM = 0 } XLogUHeap2BlockEnum; -- Gitee From bffec6fd1890a4472260677581d54aea1933e36a Mon Sep 17 00:00:00 2001 From: wang-mingxuanHedgehog <504013468@qq.com> Date: Fri, 9 Aug 2024 16:29:45 +0800 Subject: [PATCH 173/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9\d=E6=9F=A5=E8=AF=A2?= =?UTF-8?q?=E8=A1=A8=E4=BF=A1=E6=81=AF=E7=9A=84storage=5Ftype=E5=A4=A7?= =?UTF-8?q?=E5=B0=8F=E5=86=99=E7=9A=84=E4=B8=8D=E4=B8=80=E8=87=B4=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/indexcmds.cpp | 2 +- .../optimizer/commands/tablecmds.cpp | 2 +- src/include/utils/rel_gs.h | 1 + src/test/regress/expected/alter_table_003.out | 76 +++++++++---------- .../expected/alter_table_modify_ustore.out | 28 +++---- .../expected/function_get_table_def.out | 2 +- src/test/regress/expected/generated_col.out | 2 +- src/test/regress/expected/inherits01.out | 48 ++++++------ .../expected/optimizing_index_scan_ustore.out | 2 +- src/test/regress/expected/prefixkey_index.out | 20 ++--- 10 files changed, 92 insertions(+), 91 deletions(-) diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp index 7d09b42ee4..07bd4f351b 100644 --- a/src/gausskernel/optimizer/commands/indexcmds.cpp +++ b/src/gausskernel/optimizer/commands/indexcmds.cpp @@ -1284,7 +1284,7 @@ ObjectAddress DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, CheckPredicate((Expr*)stmt->whereClause); if (RelationIsUstoreFormat(rel)) { - DefElem* def = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE)); + DefElem* def = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE_LOWER)); if (stmt->options == NULL) { stmt->options = list_make1(def); } else { diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 4fe2a236ca..e0326478d0 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -1318,7 +1318,7 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt res = list_make2(def1, def2); if (g_instance.attr.attr_storage.enable_ustore && u_sess->attr.attr_sql.enable_default_ustore_table && relkind != RELKIND_MATVIEW && !IsSystemNamespace(relnamespace) && !assignedStorageType) { - DefElem* def3 = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE)); + DefElem* def3 = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE_LOWER)); res = lappend(res, def3); } } else { diff --git a/src/include/utils/rel_gs.h b/src/include/utils/rel_gs.h index 2aea5ded9d..252d44f81c 100644 --- a/src/include/utils/rel_gs.h +++ b/src/include/utils/rel_gs.h @@ -184,6 +184,7 @@ typedef struct RelationMetaData { */ #define TABLE_ACCESS_METHOD_ASTORE "ASTORE" #define TABLE_ACCESS_METHOD_USTORE "USTORE" +#define TABLE_ACCESS_METHOD_USTORE_LOWER "ustore" #define FILESYSTEM_GENERAL "general" #define FILESYSTEM_HDFS "hdfs" diff --git a/src/test/regress/expected/alter_table_003.out b/src/test/regress/expected/alter_table_003.out index 040bc368ea..e45fce0fec 100644 --- a/src/test/regress/expected/alter_table_003.out +++ b/src/test/regress/expected/alter_table_003.out @@ -1315,11 +1315,11 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f9_key" for tab f5 | integer | | plain | | f3 | integer | not null | plain | | Indexes: - "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=USTORE) TABLESPACE pg_default + "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=ustore) TABLESPACE pg_default Referenced by: TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint where conrelid = (select oid from pg_class where relname = 't_pri') order by conname; @@ -1342,16 +1342,16 @@ select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_c f6 | integer | | plain | | f7 | integer | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f8) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f3_f4_key" UNIQUE CONSTRAINT, ubtree (f3, f4) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f9_key" UNIQUE CONSTRAINT, ubtree (f9) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f8) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f3_f4_key" UNIQUE CONSTRAINT, ubtree (f3, f4) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f9_key" UNIQUE CONSTRAINT, ubtree (f9) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "t1_f5_check" CHECK (f5 = 10) Foreign-key constraints: "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint where conrelid = (select oid from pg_class where relname = 't1') order by conname; @@ -3138,7 +3138,7 @@ alter table t1 add f8 int, add f9 text first, add f10 float after f3; f5 | boolean | | plain | | f8 | integer | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE @@ -3201,7 +3201,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for f5 | boolean | | plain | | f6 | text | | extended | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE @@ -3240,7 +3240,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for f5 | boolean | | plain | | f6 | text | | extended | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE @@ -3281,7 +3281,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for f5 | boolean | | plain | | f6 | text | | extended | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE @@ -3318,7 +3318,7 @@ alter table t1 add f6 int first, add f7 float after f3; f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -3354,9 +3354,9 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for tab f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -3393,8 +3393,8 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for tab f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -3432,8 +3432,8 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for tab f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -3625,7 +3625,7 @@ alter table t1 add f6 text first, add f7 float after f3; f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE @@ -3666,7 +3666,7 @@ ERROR: Incorrect column definition, there can be only one auto_increment column f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -3709,7 +3709,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f6) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -4318,11 +4318,11 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f9_key" for tab f5 | integer | | plain | | f3 | integer | not null | plain | | Indexes: - "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=USTORE) TABLESPACE pg_default + "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=ustore) TABLESPACE pg_default Referenced by: TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint where conrelid = (select oid from pg_class where relname = 't_pri') order by conname; @@ -4345,16 +4345,16 @@ select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_c f6 | integer | | plain | | f7 | integer | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f8) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_f9_key" UNIQUE CONSTRAINT, ubtree (f9) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f8) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_f9_key" UNIQUE CONSTRAINT, ubtree (f9) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "t1_f5_check" CHECK (f5 = 10) Foreign-key constraints: "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint where conrelid = (select oid from pg_class where relname = 't1') order by conname; @@ -6516,7 +6516,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for tab f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -6550,7 +6550,7 @@ insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', f4 | bit(8) | | extended | | f5 | boolean | | plain | | Indexes: - "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -7214,11 +7214,11 @@ NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f4_f5_f6_f7_key" f f2 | integer | not null | plain | | f3 | integer | not null | plain | | Indexes: - "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=USTORE) TABLESPACE pg_default + "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=ustore) TABLESPACE pg_default Referenced by: TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore \d+ t1 Table "public.t1" @@ -7232,14 +7232,14 @@ Options: orientation=row, compression=no, storage_type=USTORE f6 | integer | | plain | | f7 | integer | | plain | | Indexes: - "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=USTORE) TABLESPACE pg_default - "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=USTORE) TABLESPACE pg_default + "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=ustore) TABLESPACE pg_default + "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "t1_f5_check" CHECK (f5 = 10) Foreign-key constraints: "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint where conrelid = (select oid from pg_class where relname = 't_pri') order by conname; @@ -7272,7 +7272,7 @@ Indexes: Referenced by: TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore \d+ t1 Table "public.t1" @@ -7293,7 +7293,7 @@ Check constraints: Foreign-key constraints: "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3) Has OIDs: no -Options: orientation=row, compression=no, storage_type=USTORE +Options: orientation=row, compression=no, storage_type=ustore select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint where conrelid = (select oid from pg_class where relname = 't_pri') order by conname; diff --git a/src/test/regress/expected/alter_table_modify_ustore.out b/src/test/regress/expected/alter_table_modify_ustore.out index 8ac13d64a2..77f21f37a6 100644 --- a/src/test/regress/expected/alter_table_modify_ustore.out +++ b/src/test/regress/expected/alter_table_modify_ustore.out @@ -37,7 +37,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modif a | integer | | plain | | b | integer | not null AUTO_INCREMENT | plain | | Indexes: - "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default + "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -51,7 +51,7 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_b_k b | character varying(8) | not null | extended | | Indexes: "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default - "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default + "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -1077,7 +1077,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_chang a | integer | | plain | | b1 | integer | not null AUTO_INCREMENT | plain | | Indexes: - "test_at_change_pkey" PRIMARY KEY, ubtree (b1) WITH (storage_type=USTORE) TABLESPACE pg_default + "test_at_change_pkey" PRIMARY KEY, ubtree (b1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -1091,7 +1091,7 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_b_k b | character varying(8) | not null | extended | | Indexes: "test_at_change_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default - "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default + "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no @@ -2100,7 +2100,7 @@ NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "pt_at_modify_b_tab b | smallint | not null | plain | | Indexes: "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default - "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default + "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default Partition By RANGE(a) Number of partitions: 3 (View pg_partition to check each partition range.) Has OIDs: no @@ -2396,7 +2396,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_test11_f11" f f12 | character varying(20) | f13 | boolean | Indexes: - "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=ustore) TABLESPACE pg_default ALTER TABLE test11 MODIFY COLUMN f11 int; \d test11 @@ -2439,7 +2439,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_test11_f11 f13 | boolean | f11 | integer | not null Indexes: - "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=ustore) TABLESPACE pg_default insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR ERROR: null value in column "f11" violates not-null constraint @@ -2456,7 +2456,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_test11_f11" f f12 | character varying(20) | not null f13 | boolean | Indexes: - "pk_test11_f11" PRIMARY KEY, ubtree (f11, f12) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_test11_f11" PRIMARY KEY, ubtree (f11, f12) WITH (storage_type=ustore) TABLESPACE pg_default ALTER TABLE test11 MODIFY COLUMN f11 int; \d test11 @@ -2499,7 +2499,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_test11_f11 f13 | boolean | f11 | integer | not null Indexes: - "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=ustore) TABLESPACE pg_default insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR ERROR: null value in column "f11" violates not-null constraint @@ -2535,7 +2535,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_p gender | character varying | not null | extended | | birthday | date | not null | plain | | Indexes: - "range_range_pkey" PRIMARY KEY, ubtree (id, birthday) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_pkey" PRIMARY KEY, ubtree (id, birthday) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default Partition By RANGE(id) Subpartition By RANGE(birthday) Number of partitions: 3 (View pg_partition to check each partition range.) Number of subpartitions: 6 (View pg_partition to check each subpartition range.) @@ -2589,7 +2589,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_p gender | character varying | not null | extended | | birthday | date | not null | plain | | Indexes: - "range_range_pkey" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_pkey" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE pg_default Partition By RANGE(id) Subpartition By RANGE(birthday) Number of partitions: 3 (View pg_partition to check each partition range.) Number of subpartitions: 6 (View pg_partition to check each subpartition range.) @@ -2643,7 +2643,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_p gender | character varying | not null | extended | | birthday | date | not null | plain | | Indexes: - "range_range_pkey" PRIMARY KEY, ubtree (gender) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_pkey" PRIMARY KEY, ubtree (gender) WITH (storage_type=ustore) TABLESPACE pg_default Partition By RANGE(id) Subpartition By RANGE(birthday) Number of partitions: 3 (View pg_partition to check each partition range.) Number of subpartitions: 6 (View pg_partition to check each subpartition range.) @@ -2686,7 +2686,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "multi_keys_ra f3 | integer | | plain | | f1 | integer | not null | plain | | Indexes: - "multi_keys_range_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default + "multi_keys_range_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default Partition By RANGE(f1, f2) Number of partitions: 3 (View pg_partition to check each partition range.) Has OIDs: no @@ -2727,7 +2727,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "multi_keys_li f3 | integer | | plain | | f1 | integer | not null | plain | | Indexes: - "multi_keys_list_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default + "multi_keys_list_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default Partition By LIST(f1, f2) Number of partitions: 3 (View pg_partition to check each partition range.) Has OIDs: no diff --git a/src/test/regress/expected/function_get_table_def.out b/src/test/regress/expected/function_get_table_def.out index fcb5976502..7e73b2d3ea 100644 --- a/src/test/regress/expected/function_get_table_def.out +++ b/src/test/regress/expected/function_get_table_def.out @@ -775,7 +775,7 @@ select pg_get_tabledef('test_us'); us2 character varying(20) NOT NULL + ) + WITH (orientation=row, storage_type=ustore, compression=no); + - ALTER TABLE test_us ADD CONSTRAINT u1 PRIMARY KEY USING ubtree (us2) WITH (storage_type=USTORE); + ALTER TABLE test_us ADD CONSTRAINT u1 PRIMARY KEY USING ubtree (us2) WITH (storage_type=ustore); (1 row) diff --git a/src/test/regress/expected/generated_col.out b/src/test/regress/expected/generated_col.out index 39e34c462b..1fdb92db34 100644 --- a/src/test/regress/expected/generated_col.out +++ b/src/test/regress/expected/generated_col.out @@ -608,7 +608,7 @@ SELECT * FROM gtest25 ORDER BY a; c | integer | default 42 x | integer | generated always as ((c * 4)) stored Indexes: - "gtest25_pkey" PRIMARY KEY, ubtree (a) WITH (storage_type=USTORE) TABLESPACE pg_default + "gtest25_pkey" PRIMARY KEY, ubtree (a) WITH (storage_type=ustore) TABLESPACE pg_default DROP TABLE gtest25; -- ALTER TABLE ... ALTER COLUMN diff --git a/src/test/regress/expected/inherits01.out b/src/test/regress/expected/inherits01.out index cfe2f82192..b41b535cbf 100644 --- a/src/test/regress/expected/inherits01.out +++ b/src/test/regress/expected/inherits01.out @@ -1972,8 +1972,8 @@ ALTER TABLE kid_2020 ALTER COLUMN num SET DEFAULT 1; num | integer | default 2 salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2025,8 +2025,8 @@ ALTER TABLE kid_2020 ALTER id DROP not null; num | integer | default 2 salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2059,8 +2059,8 @@ ALTER TABLE father DROP COLUMN IF EXISTS num;-- father without num, child have n wai_id | integer | salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2092,8 +2092,8 @@ ALTER TABLE kid_2023 no inherit father; wai_id | integer | salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2123,8 +2123,8 @@ ALTER TABLE kid_2022 RENAME CONSTRAINT father_salary_check to new_salary_check wai_id | integer | salary | real | Indexes: - "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2170,8 +2170,8 @@ LINE 2: ALTER TABLE father ALTER COLUMN id TYPE int; wai_id | integer | salary | real | Indexes: - "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2372,8 +2372,8 @@ ALTER TABLE kid_2020 ALTER COLUMN num SET DEFAULT 1; num | integer | default 2 salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2425,8 +2425,8 @@ ALTER TABLE kid_2020 ALTER id DROP not null; num | integer | default 2 salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2459,8 +2459,8 @@ ALTER TABLE father DROP COLUMN IF EXISTS num;-- father without num, child withou wai_id | integer | salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2492,8 +2492,8 @@ ALTER TABLE kid_2023 no inherit father; wai_id | integer | salary | real | Indexes: - "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "pk_father" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2522,8 +2522,8 @@ ALTER TABLE kid_2022 RENAME CONSTRAINT father_salary_check to new_salary_check wai_id | integer | salary | real | Indexes: - "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: @@ -2578,8 +2578,8 @@ LINE 2: ALTER TABLE father ALTER COLUMN id TYPE int; wai_id | integer | salary | real | Indexes: - "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE example1 - "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=USTORE) TABLESPACE pg_default + "new_index_name" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE example1 + "father_md_attr_key" UNIQUE CONSTRAINT, ubtree (md_attr) WITH (storage_type=ustore) TABLESPACE pg_default Check constraints: "father_salary_check" CHECK (salary > 0::double precision) Foreign-key constraints: diff --git a/src/test/regress/expected/optimizing_index_scan_ustore.out b/src/test/regress/expected/optimizing_index_scan_ustore.out index 2e36093db3..cd8a71bd82 100644 --- a/src/test/regress/expected/optimizing_index_scan_ustore.out +++ b/src/test/regress/expected/optimizing_index_scan_ustore.out @@ -32,7 +32,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_ustore_pkey" f c4 | character varying(20) | collate C | extended | | c5 | bigint | | plain | | Indexes: - "t_ustore_pkey" PRIMARY KEY, ubtree (c1) WITH (storage_type=USTORE) TABLESPACE pg_default + "t_ustore_pkey" PRIMARY KEY, ubtree (c1) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no diff --git a/src/test/regress/expected/prefixkey_index.out b/src/test/regress/expected/prefixkey_index.out index 58c155c6be..65c253a83b 100644 --- a/src/test/regress/expected/prefixkey_index.out +++ b/src/test/regress/expected/prefixkey_index.out @@ -2666,11 +2666,11 @@ CREATE UNIQUE INDEX prefix_index_fraw ON test_prefix_ustore (fraw(9)); fraw | raw | | extended | | fbytea | bytea | | extended | | Indexes: - "prefix_index_fraw" UNIQUE, ubtree (fraw(9)) WITH (storage_type=USTORE) TABLESPACE pg_default - "prefix_index_fblob" ubtree (fblob(5)) WITH (storage_type=USTORE) TABLESPACE pg_default - "prefix_index_fchar_fbytea" ubtree (fchar(5), fbytea(5)) WITH (storage_type=USTORE) TABLESPACE pg_default - "prefix_index_ftext" ubtree (ftext(5)) WITH (storage_type=USTORE) TABLESPACE pg_default - "prefix_index_fvchar" ubtree (fvchar(5)) WITH (storage_type=USTORE) TABLESPACE pg_default + "prefix_index_fraw" UNIQUE, ubtree (fraw(9)) WITH (storage_type=ustore) TABLESPACE pg_default + "prefix_index_fblob" ubtree (fblob(5)) WITH (storage_type=ustore) TABLESPACE pg_default + "prefix_index_fchar_fbytea" ubtree (fchar(5), fbytea(5)) WITH (storage_type=ustore) TABLESPACE pg_default + "prefix_index_ftext" ubtree (ftext(5)) WITH (storage_type=ustore) TABLESPACE pg_default + "prefix_index_fvchar" ubtree (fvchar(5)) WITH (storage_type=ustore) TABLESPACE pg_default Has OIDs: no Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=ustore @@ -2689,11 +2689,11 @@ select pg_get_tabledef('test_prefix_ustore'::regclass); fbytea bytea + ) + WITH (orientation=row, storage_type=ustore, compression=no); + - CREATE UNIQUE INDEX prefix_index_fraw ON test_prefix_ustore USING ubtree (fraw(9)) WITH (storage_type=USTORE) TABLESPACE pg_default; + - CREATE INDEX prefix_index_fblob ON test_prefix_ustore USING ubtree (fblob(5)) WITH (storage_type=USTORE) TABLESPACE pg_default; + - CREATE INDEX prefix_index_ftext ON test_prefix_ustore USING ubtree (ftext(5)) WITH (storage_type=USTORE) TABLESPACE pg_default; + - CREATE INDEX prefix_index_fvchar ON test_prefix_ustore USING ubtree (fvchar(5)) WITH (storage_type=USTORE) TABLESPACE pg_default; + - CREATE INDEX prefix_index_fchar_fbytea ON test_prefix_ustore USING ubtree (fchar(5), fbytea(5)) WITH (storage_type=USTORE) TABLESPACE pg_default; + CREATE UNIQUE INDEX prefix_index_fraw ON test_prefix_ustore USING ubtree (fraw(9)) WITH (storage_type=ustore) TABLESPACE pg_default; + + CREATE INDEX prefix_index_fblob ON test_prefix_ustore USING ubtree (fblob(5)) WITH (storage_type=ustore) TABLESPACE pg_default; + + CREATE INDEX prefix_index_ftext ON test_prefix_ustore USING ubtree (ftext(5)) WITH (storage_type=ustore) TABLESPACE pg_default; + + CREATE INDEX prefix_index_fvchar ON test_prefix_ustore USING ubtree (fvchar(5)) WITH (storage_type=ustore) TABLESPACE pg_default; + + CREATE INDEX prefix_index_fchar_fbytea ON test_prefix_ustore USING ubtree (fchar(5), fbytea(5)) WITH (storage_type=ustore) TABLESPACE pg_default; (1 row) set enable_seqscan=false; -- Gitee From 7e4897cab5121361f6bd425808d74214a22a862f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Sat, 10 Aug 2024 09:28:51 +0800 Subject: [PATCH 174/347] fix undostatfuns --- .../backend/utils/adt/pgundostatfuncs.cpp | 327 +++++++++--------- 1 file changed, 171 insertions(+), 156 deletions(-) diff --git a/src/common/backend/utils/adt/pgundostatfuncs.cpp b/src/common/backend/utils/adt/pgundostatfuncs.cpp index a451f57820..3b5e22aadd 100644 --- a/src/common/backend/utils/adt/pgundostatfuncs.cpp +++ b/src/common/backend/utils/adt/pgundostatfuncs.cpp @@ -467,197 +467,212 @@ static int OpenUndoBlock(int zoneId, BlockNumber blockno) return fd; } -static bool ParseUndoRecord(UndoRecPtr urp, Tuplestorestate *tupstore, TupleDesc tupDesc) +static bool ParseUndoRecord(UndoRecPtr urp, Tuplestorestate *tupstore, TupleDesc tupDesc, + TransactionId xid, UndoRecPtr startUrp) { - char buffer[BLCKSZ] = {'\0'}; - BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp); - int zoneId = UNDO_PTR_GET_ZONE_ID(urp); - int startingByte = ((urp) & ((UINT64CONST(1) << 44) - 1)) % BLCKSZ; - int fd = -1; - int alreadyRead = 0; - off_t seekpos; - errno_t rc = EOK; - uint32 ret = 0; - UndoRecPtr blkprev = INVALID_UNDO_REC_PTR; - UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader)); - if (!urec) { - fprintf(stderr, "malloc UndoHeader failed, out of memory\n"); - return false; - } - rc = memset_s(urec, sizeof(UndoHeader), (0), sizeof(UndoHeader)); - securec_check(rc, "\0", "\0"); do { - CHECK_FOR_INTERRUPTS(); - fd = OpenUndoBlock(zoneId, blockno); - if (fd < 0) { - free(urec); + char buffer[BLCKSZ] = {'\0'}; + BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp); + int zoneId = UNDO_PTR_GET_ZONE_ID(urp); + int startingByte = ((urp) & ((UINT64CONST(1) << 44) - 1)) % BLCKSZ; + int fd = -1; + int alreadyRead = 0; + off_t seekpos; + errno_t rc = EOK; + uint32 ret = 0; + UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader)); + if (!urec) { + fprintf(stderr, "malloc UndoHeader failed, out of memory\n"); return false; } - seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE)); - lseek(fd, seekpos, SEEK_SET); - rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ); + rc = memset_s(urec, sizeof(UndoHeader), (0), sizeof(UndoHeader)); securec_check(rc, "\0", "\0"); - ret = read(fd, (char *)buffer, BLCKSZ); - if (ret != BLCKSZ) { - close(fd); + do { + CHECK_FOR_INTERRUPTS(); + fd = OpenUndoBlock(zoneId, blockno); + if (fd < 0) { + free(urec); + return false; + } + seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE)); + lseek(fd, seekpos, SEEK_SET); + rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ); + securec_check(rc, "\0", "\0"); + ret = read(fd, (char *)buffer, BLCKSZ); + if (ret != BLCKSZ) { + close(fd); + free(urec); + fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret); + return false; + } + if (ReadUndoRecord(urec, buffer, startingByte, &alreadyRead)) { + break; + } + startingByte = UNDO_LOG_BLOCK_HEADER_SIZE; + blockno++; + } while (true); + if (TransactionIdIsValid(xid) && urec->whdr_.xid != xid) { free(urec); - fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret); - return false; - } - if (ReadUndoRecord(urec, buffer, startingByte, &alreadyRead)) { break; } - startingByte = UNDO_LOG_BLOCK_HEADER_SIZE; - blockno++; - } while (true); - blkprev = urec->wblk_.blkprev; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - bool nulls[21] = {false}; - Datum values[21]; - - rc = memset_s(textBuffer, STAT_UNDO_LOG_SIZE, 0, STAT_UNDO_LOG_SIZE); - securec_check(rc, "\0", "\0"); - values[ARR_0] = ObjectIdGetDatum(urp); - values[ARR_1] = ObjectIdGetDatum(urec->whdr_.xid); - - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->whdr_.cid); - securec_check_ss(rc, "\0", "\0"); - values[ARR_2] = CStringGetTextDatum(textBuffer); - - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->whdr_.reloid); - securec_check_ss(rc, "\0", "\0"); - values[ARR_3] = CStringGetTextDatum(textBuffer); - - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - urec->whdr_.relfilenode); - securec_check_ss(rc, "\0", "\0"); - values[ARR_4] = CStringGetTextDatum(textBuffer); - - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->whdr_.uinfo); - securec_check_ss(rc, "\0", "\0"); - values[ARR_5] = CStringGetTextDatum(textBuffer); - - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wblk_.blkprev); - securec_check_ss(rc, "\0", "\0"); - values[ARR_6] = CStringGetTextDatum(textBuffer); + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + bool nulls[21] = {false}; + Datum values[21]; - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wblk_.blkno); - securec_check_ss(rc, "\0", "\0"); - values[ARR_7] = CStringGetTextDatum(textBuffer); + rc = memset_s(textBuffer, STAT_UNDO_LOG_SIZE, 0, STAT_UNDO_LOG_SIZE); + securec_check(rc, "\0", "\0"); + values[ARR_0] = ObjectIdGetDatum(urp); + values[ARR_1] = ObjectIdGetDatum(urec->whdr_.xid); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wblk_.offset); - securec_check_ss(rc, "\0", "\0"); - values[ARR_8] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->whdr_.cid); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wtxn_.prevurp); - securec_check_ss(rc, "\0", "\0"); - values[ARR_9] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->whdr_.reloid); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); - rc = - snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, urec->wpay_.payloadlen); - securec_check_ss(rc, "\0", "\0"); - values[ARR_10] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->whdr_.relfilenode); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, urec->wtd_.oldxactid); - securec_check_ss(rc, "\0", "\0"); - values[ARR_11] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->whdr_.uinfo); + securec_check_ss(rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, urec->wpart_.partitionoid); - securec_check_ss(rc, "\0", "\0"); - values[ARR_12] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wblk_.blkprev); + securec_check_ss(rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, urec->wtspc_.tablespace); - securec_check_ss(rc, "\0", "\0"); - values[ARR_13] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wblk_.blkno); + securec_check_ss(rc, "\0", "\0"); + values[ARR_7] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, alreadyRead); - securec_check_ss(rc, "\0", "\0"); - values[ARR_14] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wblk_.offset); + securec_check_ss(rc, "\0", "\0"); + values[ARR_8] = CStringGetTextDatum(textBuffer); - char prevLen[2]; - UndoRecordSize byteToRead = sizeof(UndoRecordSize); - char *readptr = buffer + startingByte - byteToRead; - for (auto i = 0; i < byteToRead; i++) { - prevLen[i] = *readptr; - readptr++; - } - UndoRecordSize prevRecLen = *(UndoRecordSize *)(prevLen); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_UFORMAT, prevRecLen); - securec_check_ss(rc, "\0", "\0"); - values[ARR_15] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wtxn_.prevurp); + securec_check_ss(rc, "\0", "\0"); + values[ARR_9] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); - securec_check_ss(rc, "\0", "\0"); - values[ARR_16] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wpay_.payloadlen); + securec_check_ss(rc, "\0", "\0"); + values[ARR_10] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); - securec_check_ss(rc, "\0", "\0"); - values[ARR_17] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wtd_.oldxactid); + securec_check_ss(rc, "\0", "\0"); + values[ARR_11] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); - securec_check_ss(rc, "\0", "\0"); - values[ARR_18] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wpart_.partitionoid); + securec_check_ss(rc, "\0", "\0"); + values[ARR_12] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); - securec_check_ss(rc, "\0", "\0"); - values[ARR_19] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_FORMAT, urec->wtspc_.tablespace); + securec_check_ss(rc, "\0", "\0"); + values[ARR_13] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); - securec_check_ss(rc, "\0", "\0"); - values[ARR_20] = CStringGetTextDatum(textBuffer); - - if (urec->whdr_.utype != UNDO_INSERT && urec->whdr_.utype != UNDO_MULTI_INSERT && - urec->rawdata_.len > 0 && urec->rawdata_.data != NULL) { - UHeapDiskTupleDataHeader diskTuple; - if (urec->whdr_.utype == UNDO_INPLACE_UPDATE) { - Assert(urec->rawdata_.len >= (int)SizeOfUHeapDiskTupleData); - errno_t rc = memcpy_s((char *)&diskTuple + OffsetTdId, SizeOfUHeapDiskTupleHeaderExceptXid, - urec->rawdata_.data + sizeof(uint8), SizeOfUHeapDiskTupleHeaderExceptXid); - securec_check(rc, "", ""); - diskTuple.xid = (ShortTransactionId)InvalidTransactionId; - } else { - Assert(urec->rawdata_.len >= (int)SizeOfUHeapDiskTupleHeaderExceptXid); - errno_t rc = memcpy_s(((char *)&diskTuple + OffsetTdId), SizeOfUHeapDiskTupleHeaderExceptXid, - urec->rawdata_.data, SizeOfUHeapDiskTupleHeaderExceptXid); - securec_check(rc, "", ""); - diskTuple.xid = (ShortTransactionId)InvalidTransactionId; - } rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_UFORMAT, diskTuple.td_id); + UNDO_REC_PTR_FORMAT, alreadyRead); + securec_check_ss(rc, "\0", "\0"); + values[ARR_14] = CStringGetTextDatum(textBuffer); + + char prevLen[2]; + UndoRecordSize byteToRead = sizeof(UndoRecordSize); + char *readptr = buffer + startingByte - byteToRead; + for (auto i = 0; i < byteToRead; i++) { + prevLen[i] = *readptr; + readptr++; + } + UndoRecordSize prevRecLen = *(UndoRecordSize *)(prevLen); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_UFORMAT, prevRecLen); + securec_check_ss(rc, "\0", "\0"); + values[ARR_15] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); securec_check_ss(rc, "\0", "\0"); values[ARR_16] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_UFORMAT, diskTuple.reserved); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); securec_check_ss(rc, "\0", "\0"); values[ARR_17] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_UFORMAT, diskTuple.flag); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); securec_check_ss(rc, "\0", "\0"); values[ARR_18] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_UFORMAT, diskTuple.flag2); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); securec_check_ss(rc, "\0", "\0"); values[ARR_19] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_UFORMAT, diskTuple.t_hoff); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_DFORMAT, -1); securec_check_ss(rc, "\0", "\0"); values[ARR_20] = CStringGetTextDatum(textBuffer); - } - tuplestore_putvalues(tupstore, tupDesc, values, nulls); - free(urec); - close(fd); - if (blkprev != INVALID_UNDO_REC_PTR) { - ParseUndoRecord(blkprev, tupstore, tupDesc); - } + if (urec->whdr_.utype != UNDO_INSERT && urec->whdr_.utype != UNDO_MULTI_INSERT && + urec->rawdata_.len > 0 && urec->rawdata_.data != NULL) { + UHeapDiskTupleDataHeader diskTuple; + if (urec->whdr_.utype == UNDO_INPLACE_UPDATE) { + Assert(urec->rawdata_.len >= (int)SizeOfUHeapDiskTupleData); + errno_t rc = memcpy_s((char *)&diskTuple + OffsetTdId, SizeOfUHeapDiskTupleHeaderExceptXid, + urec->rawdata_.data + sizeof(uint8), SizeOfUHeapDiskTupleHeaderExceptXid); + securec_check(rc, "", ""); + diskTuple.xid = (ShortTransactionId)InvalidTransactionId; + } else { + Assert(urec->rawdata_.len >= (int)SizeOfUHeapDiskTupleHeaderExceptXid); + errno_t rc = memcpy_s(((char *)&diskTuple + OffsetTdId), SizeOfUHeapDiskTupleHeaderExceptXid, + urec->rawdata_.data, SizeOfUHeapDiskTupleHeaderExceptXid); + securec_check(rc, "", ""); + diskTuple.xid = (ShortTransactionId)InvalidTransactionId; + } + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_UFORMAT, diskTuple.td_id); + securec_check_ss(rc, "\0", "\0"); + values[ARR_16] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_UFORMAT, diskTuple.reserved); + securec_check_ss(rc, "\0", "\0"); + values[ARR_17] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_UFORMAT, diskTuple.flag); + securec_check_ss(rc, "\0", "\0"); + values[ARR_18] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_UFORMAT, diskTuple.flag2); + securec_check_ss(rc, "\0", "\0"); + values[ARR_19] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, + UNDO_REC_PTR_UFORMAT, diskTuple.t_hoff); + securec_check_ss(rc, "\0", "\0"); + values[ARR_20] = CStringGetTextDatum(textBuffer); + } + + tuplestore_putvalues(tupstore, tupDesc, values, nulls); + free(urec); + close(fd); + if (TransactionIdIsValid(xid) && IS_VALID_UNDO_REC_PTR(startUrp) && urp > startUrp) { + urp = GetPrevUrp(urp); + } else { + break; + } + } while (true); + return true; } @@ -2002,7 +2017,7 @@ Datum gs_undo_dump_record(PG_FUNCTION_ARGS) rsinfo->setDesc = tupDesc; MemoryContextSwitchTo(oldcontext); - ParseUndoRecord(undoptr, tupstore, tupDesc); + ParseUndoRecord(undoptr, tupstore, tupDesc, InvalidTransactionId, INVALID_UNDO_REC_PTR); tuplestore_donestoring(tupstore); PG_RETURN_VOID(); @@ -2064,7 +2079,7 @@ Datum gs_undo_dump_xid(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } UndoRecPtr undoptr = GetPrevUrp(miniSlot.endUndoPtr); - ParseUndoRecord(undoptr, tupstore, tupDesc); + ParseUndoRecord(undoptr, tupstore, tupDesc, xid, miniSlot.startUndoPtr); tuplestore_donestoring(tupstore); PG_RETURN_VOID(); @@ -2187,7 +2202,7 @@ Datum gs_undo_record(PG_FUNCTION_ARGS) rsinfo->setDesc = tupDesc; MemoryContextSwitchTo(oldcontext); - ParseUndoRecord(undoptr, tupstore, tupDesc); + ParseUndoRecord(undoptr, tupstore, tupDesc, InvalidTransactionId, INVALID_UNDO_REC_PTR); tuplestore_donestoring(tupstore); PG_RETURN_VOID(); -- Gitee From 8c9e9d422e656af396871cfc2d723ed69404b79d Mon Sep 17 00:00:00 2001 From: q00421813 Date: Thu, 8 Aug 2024 11:29:34 +0800 Subject: [PATCH 175/347] =?UTF-8?q?1.UHeapVerify=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E7=BD=AE=E7=A9=BA=E9=80=BB=E8=BE=91=202.undo=20lock=20swtichov?= =?UTF-8?q?er=20reinit?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/transam/xlog.cpp | 1 + .../storage/access/ustore/knl_uscan.cpp | 5 ++- .../access/ustore/undo/knl_uundoapi.cpp | 38 ++++++++++++++++--- src/gausskernel/storage/ipc/ipci.cpp | 1 + src/gausskernel/storage/lmgr/lwlock.cpp | 1 - src/include/access/ustore/undo/knl_uundoapi.h | 3 +- .../access/ustore/undo/knl_uundospace.h | 8 ++++ .../access/ustore/undo/knl_uundozone.h | 8 ++++ 8 files changed, 56 insertions(+), 9 deletions(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 201537f223..b03251dc5b 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -11969,6 +11969,7 @@ void ShutdownXLOG(int code, Datum arg) g_instance.ckpt_cxt_ctl->snapshotBlockLock = NULL; g_instance.bgwriter_cxt.rel_hashtbl_lock = NULL; g_instance.bgwriter_cxt.rel_one_fork_hashtbl_lock = NULL; + undo::ResetUndoZoneLock(); if (ENABLE_DSS && IsInitdb && g_instance.dms_cxt.SSReformerControl.primaryInstId == INVALID_INSTANCEID) { diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index 8acf69dc0c..7a02e18e07 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -1637,8 +1637,8 @@ static bool VerifyUHeapGetTup(UHeapScanDesc scan, ScanDirection dir) if (!scan->rs_base.rs_inited) { if (scan->rs_base.rs_nblocks == 0) { Assert(!BufferIsValid(scan->rs_base.rs_cbuf)); - tuple = NULL; - return tuple; + scan->rs_cutup = NULL; + return false; } page = scan->rs_base.rs_startblock; scan->rs_base.rs_cblock = page; @@ -1672,6 +1672,7 @@ static bool VerifyUHeapGetTup(UHeapScanDesc scan, ScanDirection dir) } PG_END_TRY(); if (finished) { + scan->rs_cutup = NULL; return isValidPage; } lineOff = 0; diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index e205f2b4cf..971c1cfcb8 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -786,7 +786,7 @@ void ReleaseSlotBuffer() } } -void initUndoZoneLock() +void InitUndoZoneLock() { if (g_instance.undo_cxt.uZones != NULL) { int persistZoneCount = PERSIST_ZONE_COUNT; @@ -797,13 +797,41 @@ void initUndoZoneLock() int zoneId = (int)(idx + persist * PERSIST_ZONE_COUNT); UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; if (uzone == NULL) { - break; + continue; } - uzone->InitLock(); - uzone->GetUndoSpace()->LockInit(); - uzone->GetSlotSpace()->LockInit(); + if (!(uzone->GetLock())) { + uzone->InitLock(); + } + if (!(uzone->GetUndoSpace()->GetLock())) { + uzone->GetUndoSpace()->LockInit(); + } + if (!(uzone->GetSlotSpace()->GetLock())) { + uzone->GetSlotSpace()->LockInit(); + } + } + } + } +} + +void ResetUndoZoneLock() +{ + if (g_instance.undo_cxt.uZones != NULL) { + int persistZoneCount = PERSIST_ZONE_COUNT; + for (int persist = (int)UNDO_PERMANENT; persist <= (int)UNDO_TEMP; persist++) { + CHECK_FOR_INTERRUPTS(); + for (auto idx = 0; idx < persistZoneCount; idx++) { + CHECK_FOR_INTERRUPTS(); + int zoneId = (int)(idx + persist * PERSIST_ZONE_COUNT); + UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; + if (uzone == NULL) { + continue; + } + uzone->SetLock(NULL); + uzone->GetUndoSpace()->SetLock(NULL); + uzone->GetSlotSpace()->SetLock(NULL); } } } } + } // namespace undo diff --git a/src/gausskernel/storage/ipc/ipci.cpp b/src/gausskernel/storage/ipc/ipci.cpp index 4f666f4a6d..d7228076a4 100644 --- a/src/gausskernel/storage/ipc/ipci.cpp +++ b/src/gausskernel/storage/ipc/ipci.cpp @@ -391,6 +391,7 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port) HaShmemInit(); AsyncRollbackHashShmemInit(); UndoWorkerShmemInit(); + undo::InitUndoZoneLock(); heartbeat_shmem_init(); MatviewShmemInit(); #ifndef ENABLE_MULTIPLE_NODES diff --git a/src/gausskernel/storage/lmgr/lwlock.cpp b/src/gausskernel/storage/lmgr/lwlock.cpp index a8733d3808..80cc97addd 100644 --- a/src/gausskernel/storage/lmgr/lwlock.cpp +++ b/src/gausskernel/storage/lmgr/lwlock.cpp @@ -689,7 +689,6 @@ static void InitializeLWLocks(int numLocks) for (id = NumFixedLWLocks; id < numLocks; id++, lock++) { LWLockInitialize(&lock->lock, LWTRANCHE_UNKNOWN); } - undo::initUndoZoneLock(); } const char *GetBuiltInTrancheName(int trancheId) diff --git a/src/include/access/ustore/undo/knl_uundoapi.h b/src/include/access/ustore/undo/knl_uundoapi.h index de7e8b438b..66a2d06975 100644 --- a/src/include/access/ustore/undo/knl_uundoapi.h +++ b/src/include/access/ustore/undo/knl_uundoapi.h @@ -65,7 +65,8 @@ void ReleaseSlotBuffer(); void InitUndoCountThreshold(); void RebuildUndoZoneBitmap(); UndoRecPtr GetPrevUrp(UndoRecPtr currUrp); -void initUndoZoneLock(); +void InitUndoZoneLock(); +void ResetUndoZoneLock(); } // namespace undo extern void GetUndoFileDirectory(char *path, int len, UndoPersistence upersistence); diff --git a/src/include/access/ustore/undo/knl_uundospace.h b/src/include/access/ustore/undo/knl_uundospace.h index 8caf90c519..b738fd322d 100644 --- a/src/include/access/ustore/undo/knl_uundospace.h +++ b/src/include/access/ustore/undo/knl_uundospace.h @@ -63,6 +63,10 @@ public: { return lsn_; } + inline LWLock* GetLock(void) + { + return lock_; + } uint32 Used(int zoneId); /* Setter, used for redo. */ @@ -82,6 +86,10 @@ public: { lsn_ = lsn; } + inline void SetLock(LWLock* lock) + { + lock_ = lock; + } /* Space lock/unlock. */ void LockSpace(void) diff --git a/src/include/access/ustore/undo/knl_uundozone.h b/src/include/access/ustore/undo/knl_uundozone.h index 82a9e8d4a1..67fbf55e1e 100644 --- a/src/include/access/ustore/undo/knl_uundozone.h +++ b/src/include/access/ustore/undo/knl_uundozone.h @@ -143,6 +143,10 @@ public: { return attachPid_; } + inline LWLock* GetLock(void) + { + return lock_; + } inline bool Attached(void) { return pg_atomic_read_u32(&attached_) == UNDO_ZONE_ATTACHED; @@ -220,6 +224,10 @@ public: { attached_ = attach; } + inline void SetLock(LWLock* lock) + { + lock_ = lock; + } inline bool Used(void) { return insertURecPtr_ != forceDiscardURecPtr_; -- Gitee From 4c9ff79cff5305593b1b6d7cea9bddef2a5fb6ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cyaoxin=E2=80=9D?= Date: Tue, 13 Aug 2024 16:29:50 +0800 Subject: [PATCH 176/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dundo=20space=E6=BB=A1?= =?UTF-8?q?=E5=90=8E=EF=BC=8Cerror=E6=97=A5=E5=BF=97=E6=8A=A5=E9=94=99undo?= =?UTF-8?q?=5Fspace=5Flimit=5Fsize=E4=B8=BA0=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp index 65204a3351..4b9f08e9fd 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp @@ -87,9 +87,9 @@ void UndoSpace::ExtendUndoLog(int zid, UndoLogOffset offset, uint32 dbId) while (tail < offset) { if ((!t_thrd.xlog_cxt.InRecovery) && (static_cast(g_instance.undo_cxt.undoTotalSize) + static_cast(g_instance.undo_cxt.undoMetaSize) >= u_sess->attr.attr_storage.undo_space_limit_size)) { - uint64 undoSize = (g_instance.undo_cxt.undoTotalSize + g_instance.undo_cxt.undoMetaSize) * BLCKSZ / + uint64 undoSize = ((uint64)g_instance.undo_cxt.undoTotalSize + g_instance.undo_cxt.undoMetaSize) * BLCKSZ / (1024 * 1024); - uint64 limitSize = u_sess->attr.attr_storage.undo_space_limit_size * BLCKSZ / (1024 * 1024); + uint64 limitSize = (uint64)(u_sess->attr.attr_storage.undo_space_limit_size) * BLCKSZ / (1024 * 1024); smgrclose(reln); ereport(ERROR, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "undo space size %luM > limit size %luM. Please increase the undo_space_limit_size."), -- Gitee From 54853fd25f01f6c738311ed9be7bc1281c7e65f9 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Thu, 27 Jun 2024 15:39:49 +0800 Subject: [PATCH 177/347] =?UTF-8?q?=E7=A6=81=E6=AD=A2=E6=8F=92=E5=85=A5?= =?UTF-8?q?=E5=80=BC=E4=B8=BArecord=E4=B8=8D=E7=A6=81=E6=AD=A2=E6=8F=92?= =?UTF-8?q?=E5=85=A5=E5=80=BC=E4=B8=BArow?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_target.cpp | 4 + src/common/backend/utils/misc/guc/guc_sql.cpp | 3 +- src/include/miscadmin.h | 4 +- .../expected/plpgsql_cursor_rowtype.out | 157 +++++++++++++++++- .../regress/sql/plpgsql_cursor_rowtype.sql | 122 +++++++++++++- 5 files changed, 281 insertions(+), 9 deletions(-) diff --git a/src/common/backend/parser/parse_target.cpp b/src/common/backend/parser/parse_target.cpp index 22991b0acb..5c3f683647 100644 --- a/src/common/backend/parser/parse_target.cpp +++ b/src/common/backend/parser/parse_target.cpp @@ -449,6 +449,10 @@ Expr* transformAssignedExpr(ParseState* pstate, Expr* expr, ParseExprKind exprKi checkArrayTypeInsert(pstate, expr); } + if (IsA(expr, Param) && DISABLE_RECORD_TYPE_IN_DML && type_id == RECORDOID) { + ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("The record type variable cannot be used as an insertion value."))); + } ELOG_FIELD_NAME_START(colname); /* diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index 4e29c8b518..dd761ea2e1 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -397,7 +397,8 @@ static const struct behavior_compat_entry behavior_compat_options[OPT_MAX] = { {"update_unusable_unique_index_on_iud", OPT_UPDATE_UNUSABLE_UNIQUE_INDEX_ON_IUD}, {"prefer_parse_cursor_parentheses_as_expr", OPT_PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR}, {"update_global_index_on_partition_change", OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE}, - {"float_as_numeric", OPT_FLOAT_AS_NUMERIC} + {"float_as_numeric", OPT_FLOAT_AS_NUMERIC}, + {"disable_record_type_in_dml", OPT_DISABLE_RECORD_TYPE_IN_DML} }; // increase SQL_IGNORE_STRATEGY_NUM if we need more strategy diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 72284dc63e..d78d70ac2f 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -223,7 +223,8 @@ extern bool contain_backend_version(uint32 version_number); #define OPT_PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR (1LL << 31) #define OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE (1LL << 32) #define OPT_FLOAT_AS_NUMERIC (1LL << 33) -#define OPT_MAX 34 +#define OPT_DISABLE_RECORD_TYPE_IN_DML (1LL << 34) +#define OPT_MAX 35 #define PLPSQL_OPT_FOR_LOOP 1 #define PLPSQL_OPT_OUTPARAM 2 @@ -271,6 +272,7 @@ extern bool contain_backend_version(uint32 version_number); #define PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR (u_sess->utils_cxt.behavior_compat_flags & OPT_PREFER_PARSE_CURSOR_PARENTHESES_AS_EXPR) #define UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE (u_sess->utils_cxt.behavior_compat_flags & OPT_UPDATE_GLOBAL_INDEX_ON_PARTITION_CHANGE) #define FLOAT_AS_NUMERIC (u_sess->utils_cxt.behavior_compat_flags & OPT_FLOAT_AS_NUMERIC) +#define DISABLE_RECORD_TYPE_IN_DML (u_sess->utils_cxt.behavior_compat_flags & OPT_DISABLE_RECORD_TYPE_IN_DML) /* define database compatibility Attribute */ typedef struct { diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 2d33e99bab..64e9d99129 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -64,7 +64,154 @@ END; / NOTICE: Result: (,,,,,) drop table test_2 cascade; -set behavior_compat_options='allow_procedure_compile_check'; +set behavior_compat_options='allow_procedure_compile_check,disable_record_type_in_dml'; +-- Prohibit virtual column insertion +create table t1(col1 varchar(10),col varchar(10)); +create table t2(col1 varchar(10),col varchar(10)); +insert into t1 values('one','two'); +declare + cursor cur1 is select * from t1; + source cur1%rowtype:=('ten','wtu'); +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values(source.col1, source.col); + end loop; +end; +/ +NOTICE: (one,two) +insert into t1 values('one','two'); +declare + cursor cur1 is select * from t1; + source cur1%rowtype:=('ten','wtu'); +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values(source); + end loop; +end; +/ +NOTICE: (one,two) +ERROR: The record type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t2 values(source)" +PL/pgSQL function inline_code_block line 7 at SQL statement +select * from t2; + col1 | col +------+----- + one | two +(1 row) + +drop table t1; +drop table t2; +create table t1 (a int); +create table t2 (a t1); +declare + source t2%rowtype; +begin + insert into t2 values(source.a); +end; +/ +declare + source t2%rowtype; +begin + update t2 set a = source; +end; +/ +ERROR: column "a" is of type t1 but expression is of type t2 +LINE 1: update t2 set a = source + ^ +HINT: You will need to rewrite or cast the expression. +QUERY: update t2 set a = source +CONTEXT: referenced column: a +PL/pgSQL function inline_code_block line 3 at SQL statement +declare + source t2%rowtype; +begin + update t2 set a = source.a; +end; +/ +drop table t2; +drop table t1; +-- Prohibit virtual column insertion +create table t1(col1 varchar(10), col2 int, col3 varchar(10), col4 varchar(10)); +insert into t1 values('one',5,'dsa','e'); +insert into t1 values('two',7,'daw','d'); +insert into t1 values('three',7,'dsaw','sw'); +insert into t1 values(NULL); +create table t2(col1 varchar(10), col2 int, col3 varchar(10), col4 varchar(10)); +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values('o', 5, source.col4, source.col1); + end loop; +end; +/ +NOTICE: (one,5,dsa,e) +NOTICE: (two,7,daw,d) +NOTICE: (three,7,dsaw,sw) +NOTICE: (,,,) +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values('o', 5, source.col4, source); + end loop; +end; +/ +NOTICE: (one,5,dsa,e) +ERROR: The record type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t2 values('o', 5, source.col4, source)" +PL/pgSQL function inline_code_block line 7 at SQL statement +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values('o', 5, source, source.col1); + end loop; +end; +/ +NOTICE: (one,5,dsa,e) +ERROR: The record type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t2 values('o', 5, source, source.col1)" +PL/pgSQL function inline_code_block line 7 at SQL statement +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values(source); + end loop; +end; +/ +NOTICE: (one,5,dsa,e) +ERROR: The record type variable cannot be used as an insertion value. +CONTEXT: SQL statement "insert into t2 values(source)" +PL/pgSQL function inline_code_block line 7 at SQL statement +select * from t2; + col1 | col2 | col3 | col4 +------+------+------+------- + o | 5 | e | one + o | 5 | d | two + o | 5 | sw | three + o | 5 | | +(4 rows) + +drop table t1; +drop table t2; create table emp (empno int, ename varchar(10), job varchar(10)); insert into emp values (1, 'zhangsan', 'job1'); insert into emp values (2, 'lisi', 'job2'); @@ -1060,9 +1207,9 @@ drop procedure pro_cs_trans_1; drop table cs_trans_1; -- test for rec in cursor loop show behavior_compat_options; - behavior_compat_options -------------------------------- - allow_procedure_compile_check + behavior_compat_options +---------------------------------------------------------- + allow_procedure_compile_check,disable_record_type_in_dml (1 row) create table test_table(col1 varchar2(10)); @@ -2429,7 +2576,7 @@ BEGIN END; / ERROR: "source.oid" is not a known variable ---?.* +LINE 10: source.oid := 5; ^ QUERY: DECLARE CURSOR c IS diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index e6b3d09208..84bd2b46b2 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -68,7 +68,125 @@ END; drop table test_2 cascade; -set behavior_compat_options='allow_procedure_compile_check'; +set behavior_compat_options='allow_procedure_compile_check,disable_record_type_in_dml'; + +-- Prohibit virtual column insertion +create table t1(col1 varchar(10),col varchar(10)); +create table t2(col1 varchar(10),col varchar(10)); +insert into t1 values('one','two'); +declare + cursor cur1 is select * from t1; + source cur1%rowtype:=('ten','wtu'); +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values(source.col1, source.col); + end loop; +end; +/ + +insert into t1 values('one','two'); +declare + cursor cur1 is select * from t1; + source cur1%rowtype:=('ten','wtu'); +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values(source); + end loop; +end; +/ +select * from t2; +drop table t1; +drop table t2; + +create table t1 (a int); +create table t2 (a t1); +declare + source t2%rowtype; +begin + insert into t2 values(source.a); +end; +/ + +declare + source t2%rowtype; +begin + update t2 set a = source; +end; +/ + +declare + source t2%rowtype; +begin + update t2 set a = source.a; +end; +/ + +drop table t2; +drop table t1; + +-- Prohibit virtual column insertion +create table t1(col1 varchar(10), col2 int, col3 varchar(10), col4 varchar(10)); +insert into t1 values('one',5,'dsa','e'); +insert into t1 values('two',7,'daw','d'); +insert into t1 values('three',7,'dsaw','sw'); +insert into t1 values(NULL); + +create table t2(col1 varchar(10), col2 int, col3 varchar(10), col4 varchar(10)); + +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values('o', 5, source.col4, source.col1); + end loop; +end; +/ + +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values('o', 5, source.col4, source); + end loop; +end; +/ + +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values('o', 5, source, source.col1); + end loop; +end; +/ + +declare + cursor cur1 is select * from t1; + source cur1%rowtype; +begin + for source in cur1 + loop + raise notice '%',source; + insert into t2 values(source); + end loop; +end; +/ +select * from t2; +drop table t1; +drop table t2; create table emp (empno int, ename varchar(10), job varchar(10)); insert into emp values (1, 'zhangsan', 'job1'); @@ -1837,4 +1955,4 @@ delete from employees; call f1(2); set current_schema=public; -drop schema cursor_rowtype cascade; \ No newline at end of file +drop schema cursor_rowtype cascade; -- Gitee From 7b194a09bf580cf8f0f31ca44db00aa4374d8586 Mon Sep 17 00:00:00 2001 From: yelingzhi Date: Fri, 2 Aug 2024 03:22:34 +0000 Subject: [PATCH 178/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=BA=E5=B5=8C?= =?UTF-8?q?=E5=A5=97=E6=95=B0=E7=BB=84=E7=9A=84=E4=B8=80=E7=BB=B4=E6=95=B0?= =?UTF-8?q?=E7=BB=84=E8=B5=8B=E7=B1=BB=E5=9E=8B=E4=B8=8D=E5=8C=B9=E9=85=8D?= =?UTF-8?q?=E7=9A=84=E5=80=BC=E6=97=B6=E6=8C=82=E5=BA=93=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 62 ++++++++++++++----- .../plpgsql_nested_array_and_record.out | 34 +++++++++- .../sql/plpgsql_nested_array_and_record.sql | 24 +++++++ 3 files changed, 102 insertions(+), 18 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index a11c75befa..cefc5430e0 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -10008,30 +10008,58 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v * Evaluate the subscripts, switch into left-to-right order. * Like ExecEvalArrayRef(), complain if any subscript is null. */ - for (i = 0; i < nsubscripts; i++) { - bool subisnull = false; + bool isNestedArray = ((PLpgSQL_var*)target)->nest_table != NULL; - subscriptvals[i] = exec_eval_integer(estate, subscripts[nsubscripts - 1 - i], &subisnull); - if (subisnull) { + if (isNestedArray) { + Oid subscriptType = ((PLpgSQL_var*)target)->datatype->tableOfIndexType; + HTAB* elemTableOfIndex = ((PLpgSQL_var*)target)->tableOfIndex; + PLpgSQL_var* innerVar = evalSubsciptsNested(estate, (PLpgSQL_var*)target, subscripts, nsubscripts, + 0, subscriptvals, subscriptType, elemTableOfIndex); + target = (PLpgSQL_datum*)innerVar; + /* should assign inner var as an array, copy value's index */ + if (innerVar->ispkg) { + MemoryContext temp = MemoryContextSwitchTo(innerVar->pkg->pkg_cxt); + innerVar->tableOfIndex = copyTableOfIndex(tableOfIndex); + MemoryContextSwitchTo(temp); + } else { + innerVar->tableOfIndex = copyTableOfIndex(tableOfIndex); + } + if (tableOfIndexInfo != NULL && innerVar->nest_layers != tableOfIndexInfo->tableOfLayers) { ereport(ERROR, - (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmodule(MOD_PLSQL), - errmsg("array subscript in assignment must not be null"))); + errmsg("Nest layer of assigned value is miss match to tableof var(%s)'s expection", + innerVar->refname))); } - /* - * Clean up in case the subscript expression wasn't - * simple. We can't do exec_eval_cleanup, but we can do - * this much (which is safe because the integer subscript - * value is surely pass-by-value), and we must do it in - * case the next subscript expression isn't simple either. - */ - if (estate->eval_tuptable != NULL) { - SPI_freetuptable(estate->eval_tuptable); + exec_assign_value(estate, target, PointerGetDatum(value), + valtype, isNull, innerVar->tableOfIndex); + break; + } else { + for (i = 0; i < nsubscripts; i++) { + bool subisnull = false; + + subscriptvals[i] = exec_eval_integer(estate, subscripts[nsubscripts - 1 - i], &subisnull); + if (subisnull) { + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmodule(MOD_PLSQL), + errmsg("array subscript in assignment must not be null"))); + } + + /* + * Clean up in case the subscript expression wasn't + * simple. We can't do exec_eval_cleanup, but we can do + * this much (which is safe because the integer subscript + * value is surely pass-by-value), and we must do it in + * case the next subscript expression isn't simple either. + */ + if (estate->eval_tuptable != NULL) { + SPI_freetuptable(estate->eval_tuptable); + } + estate->eval_tuptable = NULL; } - estate->eval_tuptable = NULL; } - /* Now we can restore caller's SPI_execute result if any. */ AssertEreport(estate->eval_tuptable == NULL, MOD_PLSQL, "eval tuptable should not be null"); estate->eval_tuptable = save_eval_tuptable; diff --git a/src/test/regress/expected/plpgsql_nested_array_and_record.out b/src/test/regress/expected/plpgsql_nested_array_and_record.out index 421535dafa..53cf9d73be 100644 --- a/src/test/regress/expected/plpgsql_nested_array_and_record.out +++ b/src/test/regress/expected/plpgsql_nested_array_and_record.out @@ -26,6 +26,36 @@ NOTICE: RESULT: 2 NOTICE: RESULT: 3 NOTICE: RESULT: 4 NOTICE: RESULT: 5 +CREATE OR REPLACE PROCEDURE test_nested_array as +TYPE typ_PLArray_case0001 IS varray(3) OF integer; +TYPE typ_PLArray_case0002 IS varray(3) OF typ_PLArray_case0001; +nstarr typ_PLArray_case0002; +BEGIN + nstarr(1):=1; + RAISE NOTICE '二维数组(1):%', nstarr(1); +END; +/ +CALL test_nested_array(); +ERROR: array value must start with "{" or dimension information +CONTEXT: PL/pgSQL function test_nested_array() line 5 at assignment +CREATE OR REPLACE PROCEDURE test_nested_array as +TYPE typ_PLArray_case0001 IS varray(3) OF integer; +TYPE typ_PLArray_case0002 IS varray(3) OF typ_PLArray_case0001; +nstarr typ_PLArray_case0002; +arr typ_PLArray_case0001; +BEGIN + arr(1):=1; + nstarr(1):=arr; + RAISE NOTICE '二维数组(1):%', nstarr(1); +END; +/ +CALL test_nested_array(); +NOTICE: 二维数组(1):{1} + test_nested_array +------------------- + +(1 row) + -- record of arrays DECLARE TYPE arr1 IS VARRAY(5) OF INTEGER; @@ -87,4 +117,6 @@ NOTICE: ID: 1, NAME: RECORD (1 row) DROP SCHEMA plpgsql_nested_array_and_record CASCADE; -NOTICE: drop cascades to function test_nested() +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function test_nested_array() +drop cascades to function test_nested() diff --git a/src/test/regress/sql/plpgsql_nested_array_and_record.sql b/src/test/regress/sql/plpgsql_nested_array_and_record.sql index da1bf741e7..2fda94dd15 100644 --- a/src/test/regress/sql/plpgsql_nested_array_and_record.sql +++ b/src/test/regress/sql/plpgsql_nested_array_and_record.sql @@ -18,6 +18,30 @@ BEGIN END; / +CREATE OR REPLACE PROCEDURE test_nested_array as +TYPE typ_PLArray_case0001 IS varray(3) OF integer; +TYPE typ_PLArray_case0002 IS varray(3) OF typ_PLArray_case0001; +nstarr typ_PLArray_case0002; +BEGIN + nstarr(1):=1; + RAISE NOTICE '二维数组(1):%', nstarr(1); +END; +/ +CALL test_nested_array(); + +CREATE OR REPLACE PROCEDURE test_nested_array as +TYPE typ_PLArray_case0001 IS varray(3) OF integer; +TYPE typ_PLArray_case0002 IS varray(3) OF typ_PLArray_case0001; +nstarr typ_PLArray_case0002; +arr typ_PLArray_case0001; +BEGIN + arr(1):=1; + nstarr(1):=arr; + RAISE NOTICE '二维数组(1):%', nstarr(1); +END; +/ +CALL test_nested_array(); + -- record of arrays DECLARE TYPE arr1 IS VARRAY(5) OF INTEGER; -- Gitee From deb8e6def1ad99902534777a0ed0023ed3ede9d2 Mon Sep 17 00:00:00 2001 From: DarkAthena Date: Tue, 13 Aug 2024 09:20:50 +0000 Subject: [PATCH 179/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Drecord=5Fin=E5=9C=A8A?= =?UTF-8?q?=E6=A8=A1=E5=BC=8F=E4=B8=8B=E8=A7=A3=E6=9E=90=E7=A9=BA=E5=AD=97?= =?UTF-8?q?=E7=AC=A6=E4=B8=B2=E6=B2=A1=E6=9C=89=E8=BD=AC=E6=8D=A2=E6=88=90?= =?UTF-8?q?null=E7=9A=84=E9=97=AE=E9=A2=98=20=EF=BC=88cherry=20picked=20co?= =?UTF-8?q?mmit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/rowtypes.cpp | 9 ++++++-- .../regress/expected/create_compositetype.out | 23 +++++++++++++++++++ src/test/regress/sql/create_compositetype.sql | 13 +++++++++++ 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/src/common/backend/utils/adt/rowtypes.cpp b/src/common/backend/utils/adt/rowtypes.cpp index e784da8ac1..9aec01b4b1 100644 --- a/src/common/backend/utils/adt/rowtypes.cpp +++ b/src/common/backend/utils/adt/rowtypes.cpp @@ -194,8 +194,13 @@ Datum record_in(PG_FUNCTION_ARGS) } } - column_data = buf.data; - nulls[i] = false; + if ((0 == buf.len) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { + column_data = NULL; + nulls[i] = true; + } else { + column_data = buf.data; + nulls[i] = false; + } } /* diff --git a/src/test/regress/expected/create_compositetype.out b/src/test/regress/expected/create_compositetype.out index c3e5ffd220..9327f83e2b 100644 --- a/src/test/regress/expected/create_compositetype.out +++ b/src/test/regress/expected/create_compositetype.out @@ -206,6 +206,29 @@ select * from t_type1; 1 | ("a ",abc,×ÜÎñºì¼õ·Ê°¡,jeoifjÔÚ#$@!#$½¨ê±½ð¶î·Ça121) (1 row) +drop table t_type1; +drop type comp1; +create type comp1 as(c1 char(10), c2 varchar(100), c3 text, c4 clob); +create table t_type1(a serial, b comp1); +NOTICE: CREATE TABLE will create implicit sequence "t_type1_a_seq" for serial column "t_type1.a" +set behavior_compat_options=''; +insert into t_type1(b) values(('','','','')); +select * from t_type1; + a | b +---+------- + 1 | (,,,) +(1 row) + +delete from t_type1; +set behavior_compat_options='accept_empty_str'; +insert into t_type1(b) values(('','','','')); +select * from t_type1; + a | b +---+------------------------- + 2 | (" ","","","") +(1 row) + +reset behavior_compat_options; drop table t_type1; drop type comp1; --date/time type diff --git a/src/test/regress/sql/create_compositetype.sql b/src/test/regress/sql/create_compositetype.sql index 90d3a1fcd7..e42ceac077 100644 --- a/src/test/regress/sql/create_compositetype.sql +++ b/src/test/regress/sql/create_compositetype.sql @@ -114,6 +114,19 @@ select * from t_type1; drop table t_type1; drop type comp1; +create type comp1 as(c1 char(10), c2 varchar(100), c3 text, c4 clob); +create table t_type1(a serial, b comp1); +set behavior_compat_options=''; +insert into t_type1(b) values(('','','','')); +select * from t_type1; +delete from t_type1; +set behavior_compat_options='accept_empty_str'; +insert into t_type1(b) values(('','','','')); +select * from t_type1; +reset behavior_compat_options; +drop table t_type1; +drop type comp1; + --date/time type create type comp1 as(c1 date, c2 timestamp with time zone, c3 INTERVAL DAY(3) TO SECOND (4)); create table t_type1(a serial, b comp1) ; -- Gitee From 2ca24dcbc9f8f32f839cf71a56e2c4b2b3f7ecc2 Mon Sep 17 00:00:00 2001 From: sundechao Date: Thu, 15 Aug 2024 15:19:50 +0800 Subject: [PATCH 180/347] =?UTF-8?q?=E5=88=9B=E5=BB=BA=E5=88=97=E7=BA=A6?= =?UTF-8?q?=E6=9D=9F=E5=88=86=E5=8C=BA=E8=A1=A8=EF=BC=8C=E6=A3=80=E9=AA=8C?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/nbtree/nbtinsert.cpp | 20 +++++++-------- .../storage/access/ubtree/ubtinsert.cpp | 25 +++++++------------ 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp index e3437966c3..4227aaeec7 100644 --- a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp @@ -504,18 +504,16 @@ TransactionId _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, } /* - * If we are doing a recheck, we expect to find the tuple we - * are rechecking. It's not a duplicate, but we have to keep - * scanning. For global partition index, part oid in index tuple - * is supposed to be same as heapRel oid, add check in case - * abnormal condition. + * If we are doing an index recheck (UNIQUE_CHECK_EXISTING mode), we expect + * to find tuple already in ubtree. Once the index tuple matched, it will be + * marked found. Traverse all tuples, if no matching tuple is found, report + * an error. For GPI, part oid should be the same as heapRel oid. */ - if (checkUnique == UNIQUE_CHECK_EXISTING && ItemPointerCompare(&htid, &itup->t_tid) == 0) { - if (RelationIsGlobalIndex(rel) && curPartOid != heapRel->rd_id) { - ereport(ERROR, - (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("failed to re-find tuple within GPI \"%s\"", RelationGetRelationName(rel)))); - } + if (checkUnique == UNIQUE_CHECK_EXISTING && !RelationIsGlobalIndex(rel) + && ItemPointerCompare(&htid, &itup->t_tid) == 0) { + found = true; + } else if (checkUnique == UNIQUE_CHECK_EXISTING && RelationIsGlobalIndex(rel) + && curPartOid == heapRel->rd_id && ItemPointerCompare(&htid, &itup->t_tid) == 0) { found = true; } else if (CheckItemIsAlive(&htid, tarRel, &SnapshotDirty, &all_dead, cudescScan)) { /* diff --git a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp index 95ff1ded2c..400a7a4577 100644 --- a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp @@ -787,24 +787,17 @@ static TransactionId UBTreeCheckUnique(Relation rel, IndexTuple itup, Relation h } /* - * If we are doing a recheck, we expect to find the tuple we - * are rechecking. It's not a duplicate, but we have to keep - * scanning. For global partition index, part oid in index tuple - * is supposed to be same as heapRel oid, add check in case - * abnormal condition. + * If we are doing an index recheck (UNIQUE_CHECK_EXISTING mode), we expect + * to find tuple already in ubtree. Once the index tuple matched, it will be + * marked found. Traverse all tuples, if no matching tuple is found, report + * an error. For GPI, part oid should be the same as heapRel oid. */ - if (checkUnique == UNIQUE_CHECK_EXISTING && ItemPointerCompare(&htid, &itup->t_tid) == 0) { - if (RelationIsGlobalIndex(rel) && curPartOid != heapRel->rd_id) { - ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("failed to re-find tuple within GPI \"%s\"", - RelationGetRelationName(rel)))); - } + if (checkUnique == UNIQUE_CHECK_EXISTING && ItemPointerCompare(&htid, &itup->t_tid) == 0 + && !RelationIsGlobalIndex(rel)) { + found = true; + } else if (checkUnique == UNIQUE_CHECK_EXISTING && ItemPointerCompare(&htid, &itup->t_tid) == 0 + && curPartOid == heapRel->rd_id && RelationIsGlobalIndex(rel)) { found = true; - /* - * We check the whole HOT-chain to see if there is any tuple - * that satisfies SnapshotDirty. This is necessary because we - * have just a single index entry for the entire chain. - */ } else { TransactionId xmin, xmax; bool isdead = false; -- Gitee From 428ee7d27e25831c085648b1169ce6bcc579a455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BD=95=E6=96=87=E5=81=A5?= Date: Tue, 13 Aug 2024 13:53:41 +0800 Subject: [PATCH 181/347] =?UTF-8?q?fix=20bug=20#I9P3QO=20#5=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dgs=5Fxlog=5Fkeepers=E5=87=BD=E6=95=B0=E5=9C=A8?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=89=8D=E5=90=8E=E5=9C=A8pg=5Fproc=E6=98=BE?= =?UTF-8?q?=E7=A4=BA=E4=B8=8D=E5=90=8C=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/builtin_funcs.ini | 2 +- src/common/backend/utils/init/globals.cpp | 2 +- .../rollback-post_catalog_maindb_92_950.sql | 7 +++++++ .../rollback-post_catalog_otherdb_92_950.sql | 7 +++++++ .../upgrade-post_catalog_maindb_92_950.sql | 7 +++++++ .../upgrade-post_catalog_otherdb_92_950.sql | 7 +++++++ 6 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index f8c0ec6aab..cc944c2dc6 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -3693,7 +3693,7 @@ ), AddFuncGroup( "gs_xlog_keepers", 1, - AddBuiltinFunc(_0(9040), _1("gs_xlog_keepers"), _2(0), _3(true), _4(false), _5(gs_xlog_keepers), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(3, 25, 25, 25), _22(3, 'o', 'o', 'o'), _23(3, "keeptype", "keepsegment", "describe"), _24(NULL), _25("gs_xlog_keepers"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + AddBuiltinFunc(_0(9040), _1("gs_xlog_keepers"), _2(0), _3(true), _4(true), _5(gs_xlog_keepers), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(3, 25, 25, 25), _22(3, 'o', 'o', 'o'), _23(3, "keeptype", "keepsegment", "describe"), _24(NULL), _25("gs_xlog_keepers"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f')) ), AddFuncGroup( "gs_get_next_xid_csn", 1, diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 71b73d9e47..ccb12587ff 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,7 +76,7 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92949; +const uint32 GRAND_VERSION_NUM = 92950; /******************************************** * 2.VERSION NUM FOR EACH FEATURE diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql new file mode 100644 index 0000000000..587afbd3e5 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql @@ -0,0 +1,7 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers(out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,9040; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers +(out keeptype pg_catalog.text, +out keepsegment pg_catalog.text, +out describe pg_catalog.text) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_xlog_keepers'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql new file mode 100644 index 0000000000..587afbd3e5 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql @@ -0,0 +1,7 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers(out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,9040; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers +(out keeptype pg_catalog.text, +out keepsegment pg_catalog.text, +out describe pg_catalog.text) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_xlog_keepers'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql new file mode 100644 index 0000000000..2bddfb1418 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql @@ -0,0 +1,7 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,9040; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers +(out keeptype pg_catalog.text, +out keepsegment pg_catalog.text, +out describe pg_catalog.text) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT ROWS 1000 NOT SHIPPABLE as 'gs_xlog_keepers'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql new file mode 100644 index 0000000000..2bddfb1418 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql @@ -0,0 +1,7 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,9040; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers +(out keeptype pg_catalog.text, +out keepsegment pg_catalog.text, +out describe pg_catalog.text) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT ROWS 1000 NOT SHIPPABLE as 'gs_xlog_keepers'; \ No newline at end of file -- Gitee From d3e4ddbc48e80bfc24fc55c426327d7b1e7a2f97 Mon Sep 17 00:00:00 2001 From: james Date: Fri, 16 Aug 2024 10:01:22 +0800 Subject: [PATCH 182/347] add fix --- src/common/backend/catalog/builtin_funcs.ini | 2 +- src/common/backend/utils/init/globals.cpp | 3 ++- .../rollback-post_catalog_maindb_92_951.sql | 11 +++++++++++ .../rollback-post_catalog_otherdb_92_951.sql | 11 +++++++++++ .../upgrade-post_catalog_maindb_92_951.sql | 11 +++++++++++ .../upgrade-post_catalog_otherdb_92_951.sql | 11 +++++++++++ 6 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_951.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_951.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_951.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_951.sql diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 4f5222007e..3b795da7f1 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -13044,7 +13044,7 @@ AddFuncGroup( ), AddFuncGroup( "gs_get_recv_locations", 1, - AddBuiltinFunc(_0(2872), _1("gs_get_recv_locations"), _2(0), _3(false), _4(true), _5(gs_get_recv_locations), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(4, 25, 25, 25, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "received_lsn", "write_lsn", "flush_lsn", "replay_lsn"), _24(NULL), _25("gs_get_recv_locations"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: information about WAL locations"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(2872), _1("gs_get_recv_locations"), _2(0), _3(false), _4(true), _5(gs_get_recv_locations), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(4, 25, 25, 25, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "received_lsn", "write_lsn", "flush_lsn", "replay_lsn"), _24(NULL), _25("gs_get_recv_locations"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33("statistics: information about currently wal locations"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "gs_get_hba_conf", 1, diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index ccb12587ff..08248b6ef7 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,12 +76,13 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92950; +const uint32 GRAND_VERSION_NUM = 92951; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 FLUSH_LSN_FUN_VERSION_NUM = 92951; const uint32 PUBLICATION_DDL_AT_VERSION_NUM = 92949; const uint32 MINMAXEXPR_CMPTYPE_VERSION_NUM = 92948; const uint32 PARTITION_NAME_VERSION_NUM = 92947; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_951.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_951.sql new file mode 100644 index 0000000000..bccda40276 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_951.sql @@ -0,0 +1,11 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_recv_locations() CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2872; + +CREATE FUNCTION pg_catalog.gs_get_recv_locations( + out received_lsn text, + out write_lsn text, + out flush_lsn text, + out replay_lsn text) +RETURNS record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_get_recv_locations'; +comment on function pg_catalog.gs_get_recv_locations() is 'statistics: information about currently wal locations'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_951.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_951.sql new file mode 100644 index 0000000000..bccda40276 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_951.sql @@ -0,0 +1,11 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_recv_locations() CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2872; + +CREATE FUNCTION pg_catalog.gs_get_recv_locations( + out received_lsn text, + out write_lsn text, + out flush_lsn text, + out replay_lsn text) +RETURNS record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_get_recv_locations'; +comment on function pg_catalog.gs_get_recv_locations() is 'statistics: information about currently wal locations'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_951.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_951.sql new file mode 100644 index 0000000000..b748e70a34 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_951.sql @@ -0,0 +1,11 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_recv_locations() CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2872; + +CREATE FUNCTION pg_catalog.gs_get_recv_locations( + out received_lsn text, + out write_lsn text, + out flush_lsn text, + out replay_lsn text) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE NOT SHIPPABLE ROWS 10 as 'gs_get_recv_locations'; +comment on function pg_catalog.gs_get_recv_locations() is 'statistics: information about currently wal locations'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_951.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_951.sql new file mode 100644 index 0000000000..b748e70a34 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_951.sql @@ -0,0 +1,11 @@ +DROP FUNCTION IF EXISTS pg_catalog.gs_get_recv_locations() CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2872; + +CREATE FUNCTION pg_catalog.gs_get_recv_locations( + out received_lsn text, + out write_lsn text, + out flush_lsn text, + out replay_lsn text) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE NOT SHIPPABLE ROWS 10 as 'gs_get_recv_locations'; +comment on function pg_catalog.gs_get_recv_locations() is 'statistics: information about currently wal locations'; \ No newline at end of file -- Gitee From 79fee294d70b5406c8dcb656dd703b7a7f3ca089 Mon Sep 17 00:00:00 2001 From: hwhbj Date: Fri, 16 Aug 2024 10:49:28 +0800 Subject: [PATCH 183/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Denable=5Fsecurity=5Fp?= =?UTF-8?q?olicy=3Doff=E6=97=B6=E5=88=A0=E9=99=A4=E6=A0=87=E7=AD=BE?= =?UTF-8?q?=EF=BC=8C=E7=B3=BB=E7=BB=9F=E8=A1=A8=E4=B8=AD=E6=95=B0=E6=8D=AE?= =?UTF-8?q?=E6=9C=AA=E6=8C=89=E9=A2=84=E6=9C=9F=E6=9B=B4=E6=96=B0=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/security/gs_policy/policy_common.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/gausskernel/security/gs_policy/policy_common.cpp b/src/gausskernel/security/gs_policy/policy_common.cpp index beb0888101..ce7966a645 100644 --- a/src/gausskernel/security/gs_policy/policy_common.cpp +++ b/src/gausskernel/security/gs_policy/policy_common.cpp @@ -80,6 +80,13 @@ bool GsPolicyLabel::operator < (const GsPolicyLabel& arg) const if (res > 0) { return false; } + /* if data value is equal, compare by object oid */ + if (m_data_value_fqdn.m_value_object < arg.m_data_value_fqdn.m_value_object) { + return true; + } + if (m_data_value_fqdn.m_value_object > arg.m_data_value_fqdn.m_value_object) { + return false; + } /* if data value is equal, compare by data type */ return (strcasecmp(m_data_type.c_str(), arg.m_data_type.c_str()) < 0); } -- Gitee From 26fb4a237b9ee7c5edda3c07a79ecb82649ab03a Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Thu, 15 Aug 2024 14:50:44 +0800 Subject: [PATCH 184/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Dnormal=20reform=E6=97=B6=E5=9C=A8checkpoint=E5=89=8D?= =?UTF-8?q?=20=E8=AF=AF=E5=88=B7=E5=8E=9F=E4=B8=BB=E7=9A=84=E6=8E=A7?= =?UTF-8?q?=E5=88=B6=E6=96=87=E4=BB=B6=E5=88=B0=E8=87=AA=E5=B7=B1=E7=9A=84?= =?UTF-8?q?=E6=8E=A7=E5=88=B6=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index b03251dc5b..ed746035e7 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -10162,8 +10162,9 @@ void StartupXLOG(void) * 1. standby node failover promoting. * 2. standby node switchover promoting. * 3. standby node start ondemand realtime build. + * 4. last ondemand-recovery redo phase failed, so read control file from origin primary during normal reform. */ - if (!SS_STANDBY_FAILOVER && !SS_STANDBY_PROMOTING && SS_ONDEMAND_REALTIME_BUILD_DISABLED) { + if (!(ENABLE_DMS && g_instance.dms_cxt.SSRecoveryInfo.recovery_inst_id != SS_MY_INST_ID)) { UpdateControlFile(); } -- Gitee From be34dc2c2537e1375a48a676495e60f39b77e963 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Tue, 13 Aug 2024 16:20:52 +0800 Subject: [PATCH 185/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E5=AE=9E=E6=97=B6=E6=9E=84=E5=BB=BA=E5=9C=A8ss=5Fonde?= =?UTF-8?q?mand=5Frecovery=5Fmem=5Fsize=E8=BE=83=E5=B0=8F=E7=9A=84?= =?UTF-8?q?=E6=83=85=E5=86=B5=E4=B8=8B=EF=BC=8CStartup=E7=BA=BF=E7=A8=8B?= =?UTF-8?q?=E5=8D=A1=E5=9C=A8DispatchXactRecord=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/transam/ondemand_extreme_rto/page_redo.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index 2086e5de24..17d0457fd6 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -1784,6 +1784,10 @@ bool TrxnManagerDistributeItemsBeforeEnd(RedoItem *item) TrxnManagerProcHashMapPrune(); } else { if (XLByteLT(item->record.EndRecPtr, g_redoWorker->nextPrunePtr)) { + if (XactHasSegpageRelFiles(&item->record)) { + uint32 expected = 1; + pg_atomic_compare_exchange_u32((volatile uint32 *)&(g_dispatcher->segpageXactDoneFlag), &expected, 0); + } DereferenceRedoItem(item); return exitFlag; } -- Gitee From cec8a80320970b82c8630b69a4fa2d2d92be3b7e Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Mon, 12 Aug 2024 16:59:28 +0800 Subject: [PATCH 186/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BEredo=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E6=89=A7=E8=A1=8Ccreate=20procedure=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/tcop/utility.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 5000fc0196..d413deede6 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -664,6 +664,7 @@ void PreventCommandDuringSSOndemandRedo(Node* parseTree) case T_CreateFunctionStmt: /* no need to adapt */ case T_AlterFunctionStmt: /* no need to adapt */ case T_CompileStmt: + case T_DoStmt: case T_RenameStmt: case T_TransactionStmt: case T_ViewStmt: /* no need to adapt */ -- Gitee From aa5ac475016771c4fd3b2625030b408405c85ad0 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Sat, 17 Aug 2024 15:38:57 +0800 Subject: [PATCH 187/347] =?UTF-8?q?1=E3=80=81=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E4=B8=8B=E5=85=B3=E9=97=AD=E5=8F=8C=E5=86=99=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=9B2=E3=80=81=E8=B0=83=E6=95=B4=E4=B8=80?= =?UTF-8?q?=E4=BA=9B=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BE=E4=B8=8B=E7=9A=84?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E6=89=93=E5=8D=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/initdb/initdb.cpp | 4 +++ .../utils/misc/postgresql_single.conf.sample | 1 + .../ddes/adapter/ss_reform_common.cpp | 6 ++-- .../ondemand_extreme_rto/page_redo.cpp | 28 ++++++++++++++++++- 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/bin/initdb/initdb.cpp b/src/bin/initdb/initdb.cpp index 02361f49b8..35aa599634 100644 --- a/src/bin/initdb/initdb.cpp +++ b/src/bin/initdb/initdb.cpp @@ -1603,6 +1603,10 @@ static void setup_config(void) securec_check_c(nRet, "\0", "\0"); conflines = replace_token(conflines, "#enable_segment = off", repltok); + nRet = strcpy_s(repltok, sizeof(repltok), "enable_double_write = off"); + securec_check_c(nRet, "\0", "\0"); + conflines = replace_token(conflines, "enable_double_write = on", repltok); + conflines = ss_addnodeparmater(conflines); } diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample index d37d95ad37..8b0000d9d5 100644 --- a/src/common/backend/utils/misc/postgresql_single.conf.sample +++ b/src/common/backend/utils/misc/postgresql_single.conf.sample @@ -234,6 +234,7 @@ wal_level = hot_standby # minimal, archive, hot_standby or logical enable_incremental_checkpoint = on # enable incremental checkpoint incremental_checkpoint_timeout = 60s # range 1s-1h #pagewriter_sleep = 100ms # dirty page writer sleep time, 0ms - 1h +enable_double_write = on # enable double write # - Archiving - diff --git a/src/gausskernel/ddes/adapter/ss_reform_common.cpp b/src/gausskernel/ddes/adapter/ss_reform_common.cpp index 633588106b..aec72e0351 100644 --- a/src/gausskernel/ddes/adapter/ss_reform_common.cpp +++ b/src/gausskernel/ddes/adapter/ss_reform_common.cpp @@ -105,8 +105,10 @@ int SSXLogFileOpenAnyTLI(XLogSegNo segno, int emode, uint32 sources, char* xlog_ securec_check_ss(errorno, "", ""); errno = ENOENT; - ereport(emode, (errcode_for_file_access(), errmsg("[SS] could not open file \"%s\" (log segment %s): %m", path, - XLogFileNameP(t_thrd.xlog_cxt.ThisTimeLineID, segno)))); + if (!SS_ONDEMAND_REALTIME_BUILD_NORMAL) { + ereport(emode, (errcode_for_file_access(), errmsg("[SS] could not open file \"%s\" (log segment %s): %m", path, + XLogFileNameP(t_thrd.xlog_cxt.ThisTimeLineID, segno)))); + } return -1; } diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index 2086e5de24..ac37e41d6f 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -4031,6 +4031,30 @@ static XLogRecPtr RequestPrimaryCkptAndUpdateCkptRedoPtr() return ckptRedoPtr; } +const char *PauseStatus2Str(ondemand_recovery_pause_status_t pauseState) +{ + switch (pauseState) { + case NOT_PAUSE: + return "not_pause"; + break; + case PAUSE_FOR_SYNC_REDO: + return "sync_redo"; + break; + case PAUSE_FOR_PRUNE_HASHMAP: + return "prune_hashmap"; + break; + case PAUSE_FOR_PRUNE_SEG_QUEUE: + return "prune_seg_queue"; + break; + case PAUSE_FOR_PRUNE_TRXN_QUEUE: + return "prune_trxn_queue"; + break; + default: + return "unkown"; + break; + } +} + static void OndemandPauseRedoAndRequestPrimaryDoCkpt(OndemandCheckPauseCB activatePauseFunc, OndemandCheckPauseCB continuePauseFunc, OndemandProcPauseStatusCB refreshPauseStatusFunc, OndemandProcPauseStatusCB logPauseStatusFunc, ondemand_recovery_pause_status_t pauseState, @@ -4045,7 +4069,7 @@ static void OndemandPauseRedoAndRequestPrimaryDoCkpt(OndemandCheckPauseCB activa int level = SS_ONDEMAND_REALTIME_BUILD_NORMAL ? LOG : WARNING; g_instance.dms_cxt.SSRecoveryInfo.ondemand_recovery_pause_status = pauseState; ereport(level, (errcode(ERRCODE_LOG), - errmsg("[On-demand] ondemand recovery meet pause status, type %d", pauseState))); + errmsg("[On-demand] ondemand recovery meet pause status, type %s", PauseStatus2Str(pauseState)))); do { // other redo workers will proc pause state directly if primary node crash if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { @@ -4063,6 +4087,8 @@ static void OndemandPauseRedoAndRequestPrimaryDoCkpt(OndemandCheckPauseCB activa RedoInterruptCallBack(); pg_usleep(100000L); /* 100 ms */ } while (continuePauseFunc()); + ereport(LOG, (errcode(ERRCODE_LOG), + errmsg("[On-demand] ondemand recovery cancel pause status"))); } g_instance.dms_cxt.SSRecoveryInfo.ondemand_recovery_pause_status = NOT_PAUSE; } -- Gitee From 1daa5d3f1a47f40ff3673b079ed711d275e1ca95 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Thu, 8 Aug 2024 15:30:49 +0800 Subject: [PATCH 188/347] =?UTF-8?q?=E6=A0=B9=E6=8D=AEfree=E6=A0=87?= =?UTF-8?q?=E8=AE=B0=E5=8E=BB=E9=87=8A=E6=94=BEtup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_comp.cpp | 5 +- src/common/pl/plpgsql/src/pl_exec.cpp | 16 ++++-- .../expected/plpgsql_cursor_rowtype.out | 51 +++++++++++++++++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 47 +++++++++++++++++ 4 files changed, 112 insertions(+), 7 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index b6f2f8499b..353d021241 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -3898,8 +3898,9 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ securec_check(rc, "\0", "\0"); rec->tup = (HeapTuple)tableam_tops_form_tuple(rec->tupdesc, NULL, nulls); - rec->freetupdesc = (rec->tupdesc != NULL) ? true : false; - rec->freetup = (rec->tup != NULL) ? true : false; + /* compile_tmp_cx will automatically free, there is no need to set free mark. */ + rec->freetupdesc = false; + rec->freetup = false; pfree_ext(nulls); if (target_cxt) { diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index c4acf98074..b088bd009b 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1186,7 +1186,7 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d rc = memset_s(newnulls, new_natts * sizeof(bool), true, new_natts * sizeof(bool)); securec_check(rc, "\0", "\0"); - if (!HeapTupleIsValid(rec->tup)) { + if (!HeapTupleIsValid(rec->tup) || rec->freetup == false) { rec->tupdesc = new_tupdesc; rec->tup = (HeapTuple)tableam_tops_form_tuple(new_tupdesc, NULL, newnulls); rec->freetupdesc = (rec->tupdesc != NULL) ? true : false; @@ -1247,10 +1247,17 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d } newtup = (HeapTuple)tableam_tops_modify_tuple(new_tup, new_tupdesc, newvalues, newnulls, replaces); - heap_freetuple_ext(rec->tup); - FreeTupleDesc(rec->tupdesc); + if (rec->freetup) { + heap_freetuple_ext(rec->tup); + } + if (rec->freetupdesc) { + FreeTupleDesc(rec->tupdesc); + } + rec->tup = newtup; rec->tupdesc = new_tupdesc; + rec->freetupdesc = (rec->tupdesc != NULL) ? true : false; + rec->freetup = (rec->tup != NULL) ? true : false; pfree_ext(replaces); pfree_ext(newvalues); pfree_ext(newnulls); @@ -8746,7 +8753,6 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) for (int i = 0; i < estate->ndatums; i++) { if (estate->datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { PLpgSQL_rec *rec = (PLpgSQL_rec*)estate->datums[i]; - PLpgSQL_var *cursor = (PLpgSQL_var*)estate->datums[rec->cursorDno]; if (rec->cursorDno == curvar->dno) { MemoryContext temp = NULL; MemoryContext target_cxt = NULL; @@ -9516,7 +9522,7 @@ static void exec_move_row_from_fields(PLpgSQL_execstate *estate, PLpgSQL_datum * } rec->tup = newtup; - rec->freetup = true; + rec->freetup = (rec->tup != NULL) ? true : false; pfree_ext(replaces); pfree_ext(newvalues); pfree_ext(newnulls); diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 2d33e99bab..ac498a32de 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -142,6 +142,57 @@ INFO: 2 (1 row) +-- test change cxt +drop table if exists t_CurRowtype_Use_Case0007_1; +NOTICE: table "t_currowtype_use_case0007_1" does not exist, skipping +create table t_CurRowtype_Use_Case0007_1(col1 varchar(30),col2 varchar(30)); +insert into t_CurRowtype_Use_Case0007_1 values ('col1_a', 'col2_aa'); +insert into t_CurRowtype_Use_Case0007_1 values ('col1_b', 'col2_bb'); +drop table if exists t_CurRowtype_Use_Case0007_2; +NOTICE: table "t_currowtype_use_case0007_2" does not exist, skipping +create table t_CurRowtype_Use_Case0007_2(col1 varchar(30),col2 varchar(30)); +create or replace package pac_CurRowtype_Use_Case0007_5 is +cursor cur1 is select col1,col2 from t_CurRowtype_Use_Case0007_1; +var1 cur1%rowtype; +procedure p_CurRowtype_Use_Case0007_5(a cur1%rowtype); +end pac_CurRowtype_Use_Case0007_5; +/ +create or replace package body pac_CurRowtype_Use_Case0007_5 is +procedure p_CurRowtype_Use_Case0007_5(a cur1%rowtype) is +begin + var1.col1:='pack5_proc_col1'; + var1.col2:='pack5_proc_col2'; + insert into t_CurRowtype_Use_Case0007_2 values(var1.col1,var1.col2); +end; +end pac_CurRowtype_Use_Case0007_5; +/ +create or replace package pac_CurRowtype_Use_Case0007_6 is +cursor cur1 is select col1,col2 from t_CurRowtype_Use_Case0007_1; +var1 cur1%rowtype; +procedure p_CurRowtype_Use_Case0007_6; +end pac_CurRowtype_Use_Case0007_6; +/ +create or replace package body pac_CurRowtype_Use_Case0007_6 is +procedure p_CurRowtype_Use_Case0007_6 is +begin + open cur1; + fetch cur1 into var1; + pac_CurRowtype_Use_Case0007_5.p_CurRowtype_Use_Case0007_5(var1); +end; +end pac_CurRowtype_Use_Case0007_6; +/ +call pac_CurRowtype_Use_Case0007_6.p_CurRowtype_Use_Case0007_6(); + p_currowtype_use_case0007_6 +----------------------------- + +(1 row) + +drop table if exists t_CurRowtype_Use_Case0007_1; +drop table if exists t_CurRowtype_Use_Case0007_2; +drop package pac_CurRowtype_Use_Case0007_5; +--?.* +drop package pac_CurRowtype_Use_Case0007_6; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p_currowtype_use_case0007_6() -- test alias error create or replace procedure pro_cursor_args is diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index e6b3d09208..12558bbe79 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -135,6 +135,53 @@ end; call pro_cursor_no_args_1(); +-- test change cxt +drop table if exists t_CurRowtype_Use_Case0007_1; +create table t_CurRowtype_Use_Case0007_1(col1 varchar(30),col2 varchar(30)); +insert into t_CurRowtype_Use_Case0007_1 values ('col1_a', 'col2_aa'); +insert into t_CurRowtype_Use_Case0007_1 values ('col1_b', 'col2_bb'); + +drop table if exists t_CurRowtype_Use_Case0007_2; +create table t_CurRowtype_Use_Case0007_2(col1 varchar(30),col2 varchar(30)); + +create or replace package pac_CurRowtype_Use_Case0007_5 is +cursor cur1 is select col1,col2 from t_CurRowtype_Use_Case0007_1; +var1 cur1%rowtype; +procedure p_CurRowtype_Use_Case0007_5(a cur1%rowtype); +end pac_CurRowtype_Use_Case0007_5; +/ +create or replace package body pac_CurRowtype_Use_Case0007_5 is +procedure p_CurRowtype_Use_Case0007_5(a cur1%rowtype) is +begin + var1.col1:='pack5_proc_col1'; + var1.col2:='pack5_proc_col2'; + insert into t_CurRowtype_Use_Case0007_2 values(var1.col1,var1.col2); +end; +end pac_CurRowtype_Use_Case0007_5; +/ + +create or replace package pac_CurRowtype_Use_Case0007_6 is +cursor cur1 is select col1,col2 from t_CurRowtype_Use_Case0007_1; +var1 cur1%rowtype; +procedure p_CurRowtype_Use_Case0007_6; +end pac_CurRowtype_Use_Case0007_6; +/ +create or replace package body pac_CurRowtype_Use_Case0007_6 is +procedure p_CurRowtype_Use_Case0007_6 is +begin + open cur1; + fetch cur1 into var1; + pac_CurRowtype_Use_Case0007_5.p_CurRowtype_Use_Case0007_5(var1); +end; +end pac_CurRowtype_Use_Case0007_6; +/ + +call pac_CurRowtype_Use_Case0007_6.p_CurRowtype_Use_Case0007_6(); +drop table if exists t_CurRowtype_Use_Case0007_1; +drop table if exists t_CurRowtype_Use_Case0007_2; +drop package pac_CurRowtype_Use_Case0007_5; +drop package pac_CurRowtype_Use_Case0007_6; + -- test alias error create or replace procedure pro_cursor_args is -- Gitee From bb6c8021da0608ff793c3d01925408e4fb6e59da Mon Sep 17 00:00:00 2001 From: Rock Date: Wed, 14 Aug 2024 17:06:29 +0800 Subject: [PATCH 189/347] =?UTF-8?q?=E3=80=90=E4=B8=AD=E7=A7=BB=E4=BF=A1?= =?UTF-8?q?=E6=81=AF=E6=8A=80=E6=9C=AF=E3=80=91bugfix:gs=5Fdump=E6=8C=87?= =?UTF-8?q?=E5=AE=9Aschema=E5=AF=BC=E5=87=BA=E6=97=B6=E6=B2=A1=E6=9C=89?= =?UTF-8?q?=E5=AF=BC=E5=87=BA=E5=90=8C=E4=B9=89=E8=AF=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_dump.cpp | 9 +++++-- src/test/regress/input/gs_dump_synonym.source | 16 +++++++++++++ .../regress/output/gs_dump_synonym.source | 24 +++++++++++++++++++ src/test/regress/parallel_schedule0 | 2 +- src/test/regress/parallel_schedule0A | 2 +- 5 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 src/test/regress/input/gs_dump_synonym.source create mode 100644 src/test/regress/output/gs_dump_synonym.source diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index c82db46370..4cf78c39dd 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -1535,7 +1535,6 @@ void getopt_dump(int argc, char** argv, struct option options[], int* result) break; case 'n': /* include schema(s) */ simple_string_list_append(&schema_include_patterns, optarg); - include_everything = false; break; case 'N': /* exclude schema(s) */ @@ -22539,6 +22538,7 @@ static void dumpSynonym(Archive* fout) int i_tableoid = 0; int i_synname = 0; int i_nspname = 0; + int i_nspoid = 0; int i_rolname = 0; int i_synobjschema = 0; int i_synobjname = 0; @@ -22569,7 +22569,7 @@ static void dumpSynonym(Archive* fout) * Only the super user can access pg_authid. Therefore, user verification is ignored. */ appendPQExpBuffer(query, - "SELECT s.oid, s.tableoid, s.synname, n.nspname, a.rolname, s.synobjschema, s.synobjname " + "SELECT s.oid, s.tableoid, s.synname, n.nspname, n.oid as nspoid, a.rolname, s.synobjschema, s.synobjname " "FROM pg_synonym s, pg_namespace n, pg_authid a " "WHERE n.oid = s.synnamespace AND s.synowner = a.oid;"); @@ -22586,11 +22586,16 @@ static void dumpSynonym(Archive* fout) i_tableoid = PQfnumber(res, "tableoid"); i_synname = PQfnumber(res, "synname"); i_nspname = PQfnumber(res, "nspname"); + i_nspoid = PQfnumber(res, "nspoid"); i_rolname = PQfnumber(res, "rolname"); i_synobjschema = PQfnumber(res, "synobjschema"); i_synobjname = PQfnumber(res, "synobjname"); for (i = 0; i < ntups; i++) { + Oid schemaOid = atooid(PQgetvalue(res, i, i_nspoid)); + if (!simple_oid_list_member(&schema_include_oids, schemaOid)) { + continue; + } char* synname = NULL; char* nspname = NULL; char* rolname = NULL; diff --git a/src/test/regress/input/gs_dump_synonym.source b/src/test/regress/input/gs_dump_synonym.source new file mode 100644 index 0000000000..7ef1120d46 --- /dev/null +++ b/src/test/regress/input/gs_dump_synonym.source @@ -0,0 +1,16 @@ +drop database if exists dump_synonym; +create database dump_synonym; +create database restore_synonym_db; +\c dump_synonym +create schema t1; +create schema t2; +create table t1.test1(col1 int); +create or replace synonym t1.syn1 for t1.test1; +create or replace synonym t2.syn2 for t1.test1; +\! @abs_bindir@/gs_dump -p @portstring@ -n t1 -f @abs_bindir@/dump_synonym.dmp -s dump_synonym > @abs_bindir@/gs_dump.log 2>&1 ; echo $? +\! @abs_bindir@/gsql -d restore_synonym_db -p @portstring@ -f @abs_bindir@/dump_synonym.dmp > @abs_bindir@/gs_dump.log 2>&1 ; echo $? +\c restore_synonym_db +select synname from pg_synonym; +drop database dump_synonym; +\c regression +drop database restore_synonym_db; diff --git a/src/test/regress/output/gs_dump_synonym.source b/src/test/regress/output/gs_dump_synonym.source new file mode 100644 index 0000000000..c631a0ed7e --- /dev/null +++ b/src/test/regress/output/gs_dump_synonym.source @@ -0,0 +1,24 @@ +drop database if exists dump_synonym; +NOTICE: database "dump_synonym" does not exist, skipping +create database dump_synonym; +create database restore_synonym_db; +\c dump_synonym +create schema t1; +create schema t2; +create table t1.test1(col1 int); +create or replace synonym t1.syn1 for t1.test1; +create or replace synonym t2.syn2 for t1.test1; +\! @abs_bindir@/gs_dump -p @portstring@ -n t1 -f @abs_bindir@/dump_synonym.dmp -s dump_synonym > @abs_bindir@/gs_dump.log 2>&1 ; echo $? +0 +\! @abs_bindir@/gsql -d restore_synonym_db -p @portstring@ -f @abs_bindir@/dump_synonym.dmp > @abs_bindir@/gs_dump.log 2>&1 ; echo $? +0 +\c restore_synonym_db +select synname from pg_synonym; + synname +--------- + syn1 +(1 row) + +drop database dump_synonym; +\c regression +drop database restore_synonym_db; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index d396ebe1a1..921478c21b 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -73,7 +73,7 @@ test: transaction_with_snapshot test: select_into_user_defined_variables test: select_into_file -test: gs_dump_package trigger_dump gs_dumpall +test: gs_dump_package trigger_dump gs_dumpall gs_dump_synonym test: out_param_func #test: sqlcode_cursor test: gs_dump_tableconstraint diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index b64f370871..213015b877 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -63,7 +63,7 @@ test: set_transaction_test test: select_into_user_defined_variables test: select_into_file -test: gs_dump_package trigger_dump +test: gs_dump_package trigger_dump gs_dump_synonym test: out_param_func out_param_func_overload #test: sqlcode_cursor test: gs_dump_tableconstraint -- Gitee From f792af04770717f863be194e2b9914a829a5baa2 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Tue, 13 Aug 2024 20:39:10 +0800 Subject: [PATCH 190/347] =?UTF-8?q?cherry=20pick=202bb007b=20from=20https:?= =?UTF-8?q?//gitee.com/chen-zhikai-999/openGauss-server/pulls/6012=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8F=8C=E9=9B=86=E7=BE=A4=E6=8F=A1=E6=89=8B?= =?UTF-8?q?=E5=A4=B1=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/interfaces/libpq/fe-connect.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/common/interfaces/libpq/fe-connect.cpp b/src/common/interfaces/libpq/fe-connect.cpp index bf9793955a..3b4d907e7b 100644 --- a/src/common/interfaces/libpq/fe-connect.cpp +++ b/src/common/interfaces/libpq/fe-connect.cpp @@ -1794,8 +1794,10 @@ static int connectDBComplete(PGconn* conn) #ifdef ENABLE_LITE_MODE destroyPQExpBuffer(errMsgBuf); #endif - char* dbName = conn->dbName; - if (conn->status == CONNECTION_OK && dbName != NULL && strcmp(dbName, "replication") != 0) { + char* replication = conn->replication; + if (conn->status == CONNECTION_OK && (replication == NULL || strcasecmp(replication, "false") == 0 || + strcasecmp(replication, "off") == 0 || strcasecmp(replication, "no") == 0 || + strcasecmp(replication, "0") == 0)) { PQgetDBCompatibility(conn); } return 1; /* success! */ -- Gitee From 3e55a1f4df54afe6e6ca740d30b73f18dbd6fd0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Fri, 16 Aug 2024 16:55:01 +0800 Subject: [PATCH 191/347] =?UTF-8?q?upage=20ubtree=E6=A0=A1=E9=AA=8C?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=89=93=E5=8D=B0rnode=20block=20offset?= =?UTF-8?q?=E4=BF=A1=E6=81=AF=20ubtree=E4=BC=98=E5=8C=96=E6=A0=A1=E9=AA=8C?= =?UTF-8?q?=E7=BA=A7=E5=88=AB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/verify.cpp | 2 +- .../storage/access/ubtree/ubtdump.cpp | 199 +++++++++------- .../storage/access/ubtree/ubtrecycle.cpp | 40 ++-- .../storage/access/ustore/knl_pruneuheap.cpp | 3 +- .../storage/access/ustore/knl_uheap.cpp | 24 +- .../storage/access/ustore/knl_undoaction.cpp | 5 +- .../storage/access/ustore/knl_upage.cpp | 215 +++++++++++------- .../storage/access/ustore/knl_uredo.cpp | 26 +-- .../storage/access/ustore/knl_uundorecord.cpp | 39 ++-- .../access/ustore/undo/knl_uundotxn.cpp | 2 +- src/include/access/ustore/knl_upage.h | 9 +- 11 files changed, 324 insertions(+), 240 deletions(-) diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp index f56f5c5963..44b068a546 100644 --- a/src/gausskernel/optimizer/commands/verify.cpp +++ b/src/gausskernel/optimizer/commands/verify.cpp @@ -1898,7 +1898,7 @@ static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, ForkNum UBTRecycleQueueVerifyPageOffline(rel, page, blkno); } } else { - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, NULL, blkno); } } diff --git a/src/gausskernel/storage/access/ubtree/ubtdump.cpp b/src/gausskernel/storage/access/ubtree/ubtdump.cpp index 9b3bd338bd..896d33fa97 100644 --- a/src/gausskernel/storage/access/ubtree/ubtdump.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtdump.cpp @@ -34,7 +34,7 @@ static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, Off static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno); static void UBTreeVerifyHeader(PageHeaderData* page, Relation rel, BlockNumber blkno, uint16 pageSize, uint16 headerSize); static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber blkno, OffsetNumber offset, - ItemIdSort indexSortPtr, const char *indexName, Oid relOid); + ItemIdSort indexSortPtr, const char *indexName, Relation rel); void UBTreeVerifyIndex(Relation rel, TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols) { @@ -404,33 +404,33 @@ char* UBTGetVerifiedResultStr(uint32 type) } } -bool UBTreeVerifyTupleTransactionStatus(Relation rel, BlockNumber blkno, TransactionIdStatus xminStatus, TransactionIdStatus xmaxStatus, +static bool UBTreeVerifyTupleTransactionStatus(Relation rel, BlockNumber blkno, OffsetNumber offnum, + TransactionIdStatus xminStatus, TransactionIdStatus xmaxStatus, TransactionId xmin, TransactionId xmax, CommitSeqNo xminCSN, CommitSeqNo xmaxCSN) { - if (u_sess->attr.attr_storage.ustore_verify_level < USTORE_VERIFY_FAST) { - return false; - } - bool tranStatusError = false; switch (xminStatus) { case XID_COMMITTED: - tranStatusError = (xmaxStatus == XID_COMMITTED && xminCSN > xmaxCSN && xmaxCSN != COMMITSEQNO_FROZEN) ? true : false; + tranStatusError = (xmaxStatus == XID_COMMITTED && xminCSN > xmaxCSN && xmaxCSN != COMMITSEQNO_FROZEN); break; case XID_INPROGRESS: - tranStatusError = (xmaxStatus == XID_COMMITTED && TransactionIdIsValid(xmax)) ? true : false; + tranStatusError = (xmaxStatus == XID_COMMITTED && TransactionIdIsValid(xmax)); break; case XID_ABORTED: - tranStatusError = (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED) ? true : false; + tranStatusError = (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED); break; default: break; } + if (tranStatusError) { + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] xmin or xmax status invalid, relName=%s, blkno=%u, xmin=%lu, xmax=%lu, xminStatus=%d, " - "xmaxStatus=%d, xminCSN=%lu, xmaxCSN=%lu.", NameStr(rel->rd_rel->relname), blkno, xmin, xmax, xminStatus, - xmaxStatus, xminCSN, xmaxCSN))); + "[Verify UBTree] xmin or xmax status invalid, xmin=%lu, xmax=%lu, xminStatus=%d, " + "xmaxStatus=%d, xminCSN=%lu, xmaxCSN=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + xmin, xmax, xminStatus, xmaxStatus, xminCSN, xmaxCSN, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); return false; } return true; @@ -440,21 +440,21 @@ static int ItemCompare(const void *item1, const void *item2) { return ((ItemIdSort)item1)->start - ((ItemIdSort)item2)->start; } - + void UBTreeVerifyHikey(Relation rel, Page page, BlockNumber blkno) { CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - Oid relOid = (rel ? rel->rd_id : InvalidOid); UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); if (P_RIGHTMOST(opaque)) return; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (P_ISLEAF(opaque) ? (opaque->btpo.level != 0) : (opaque->btpo.level == 0)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted rel %s, level %u, flag %u, tid[%d:%d]", (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), - opaque->btpo.level, opaque->btpo_flags, relOid, blkno))); + errmsg("UBTREEVERIFY corrupted. level %u, flag %u, rnode[%u,%u,%u], block %u.", + opaque->btpo.level, opaque->btpo_flags, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); return; } @@ -475,34 +475,39 @@ void UBTreeVerifyHikey(Relation rel, Page page, BlockNumber blkno) index_deform_tuple(lastTuple, RelationGetDescr(rel), values, isnull); char *keyDesc = BuildIndexValueDescription(rel, values, isnull); ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted key %s with HIKEY compare in rel %s, tid[%d:%d]", - (keyDesc ? keyDesc : "(UNKNOWN)"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), relOid, blkno))); + errmsg("UBTREEVERIFY corrupted key %s with HIKEY compare in rel %s, rnode[%u,%u,%u], block %u.", + (keyDesc ? keyDesc : "(UNKNOWN)"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } - + void UBTreeVerifyPageXid(Relation rel, BlockNumber blkno, TransactionId xidBase, TransactionId pruneXid) { CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - Oid relOid = (rel ? rel->rd_id : InvalidOid); - + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (TransactionIdFollows(xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid) || TransactionIdPrecedes(xidBase + MaxShortTransactionId, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] ubtree's page xid_base invalid: indexName=%s, oid=%u, blkno=%u, xid_base=%lu, nextxid=%lu.", - indexName, relOid, blkno, xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + "[Verify UBTree] ubtree's page xid_base invalid: indexName=%s, xid_base=%lu, nextxid=%lu, " + "rnode[%u,%u,%u], block %u.", + indexName, xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); return; } if (TransactionIdFollows(pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] ubtree's page prune_xid invalid: indexName=%s, oid=%u, blkno=%u, xid_base=%lu, nextxid=%lu.", - indexName, relOid, blkno, pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + "[Verify UBTree] ubtree's page prune_xid invalid: indexName=%s, xid_base=%lu, nextxid=%lu, " + "rnode[%u,%u,%u], block %u.", + indexName, pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); return; } } -void UBTreeVerifyTupleTransactionInfo(Relation rel, Page page, OffsetNumber offnum, bool fromInsert, TransactionId xidBase) +static void UBTreeVerifyTupleTransactionInfo(Relation rel, BlockNumber blkno, Page page, + OffsetNumber offnum, bool fromInsert, TransactionId xidBase) { CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) @@ -511,26 +516,31 @@ void UBTreeVerifyTupleTransactionInfo(Relation rel, Page page, OffsetNumber offn IndexTuple tuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(tuple); - TransactionId xid = fromInsert? ShortTransactionIdToNormal(xidBase, uxid->xmin) : ShortTransactionIdToNormal(xidBase, uxid->xmax); + TransactionId xid = fromInsert ? + ShortTransactionIdToNormal(xidBase, uxid->xmin) : ShortTransactionIdToNormal(xidBase, uxid->xmax); + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (TransactionIdIsNormal(xid) && !TransactionIdIsCurrentTransactionId(xid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmodule(MOD_USTORE), errmsg("[Verify UBTree] tuple xid %s invalid: indexName=%s, oid=%u, xid=%lu.", - (fromInsert ? "xmin" : "xmax"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), - (rel ? rel->rd_id : InvalidOid), xid))); + errmodule(MOD_USTORE), errmsg("[Verify UBTree] tuple xid %s invalid: indexName=%s, xid=%lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", + (fromInsert ? "xmin" : "xmax"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), xid, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); } } -void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber blkno, OffsetNumber startoffset, bool fromInsert, - TransactionId xidBase) +static void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber blkno, + OffsetNumber startoffset, bool fromInsert, TransactionId xidBase) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) TransactionId maxXmax = InvalidTransactionId; TransactionId minCommittedXmax = MaxTransactionId; TransactionId pruneXid = ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid); OffsetNumber maxoff = PageGetMaxOffsetNumber(page); TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; + for (OffsetNumber offnum = startoffset; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { ItemId itemid = PageGetItemId(page, offnum); IndexTuple tuple = (IndexTuple)PageGetItem(page, itemid); @@ -540,8 +550,10 @@ void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber b if (TransactionIdFollows(Max(xmin, xmax), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index tuple xid(xmin/xmax) is bigger than nextXid: relName=%s, blkno=%u, xmin=%lu, xmax=%lu, nextxid=%lu, xid_base=%lu.", - NameStr(rel->rd_rel->relname), blkno, xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextXid, xidBase))); + "[Verify UBTree] index tuple xid(xmin/xmax) is bigger than nextXid: " + "xmin=%lu, xmax=%lu, nextxid=%lu, xid_base=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextXid, xidBase, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); return; } @@ -549,15 +561,17 @@ void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber b if (TransactionIdIsNormal(xmin) && !IndexItemIdIsFrozen(itemid) && TransactionIdPrecedes(xmin + base, oldestXmin)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index tuple xmin invalid: relName=%s, blkno=%u, xmin=%lu, oldest_xmin=%lu, xid_base=%lu.", - NameStr(rel->rd_rel->relname), blkno, xmin, oldestXmin, xidBase))); + "[Verify UBTree] index tuple xmin invalid: xmin=%lu, oldest_xmin=%lu, xid_base=%lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", + xmin, oldestXmin, xidBase, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); return; } if (TransactionIdIsNormal(xmax) && !ItemIdIsDead(itemid) && TransactionIdPrecedes(xmax + base, oldestXmin)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index tuple xmin invalid: relName=%s, blkno=%u, xmax=%lu, oldest_xmin=%lu, xid_base=%lu.", - NameStr(rel->rd_rel->relname), blkno, xmax, oldestXmin, xidBase))); + "[Verify UBTree] index tuple xmin invalid: xmax=%lu, oldest_xmin=%lu, xid_base=%lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", + xmax, oldestXmin, xidBase, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); return; } if (!u_sess->attr.attr_storage.ustore_verify) { @@ -576,10 +590,12 @@ void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber b if (xmaxStatus == XID_COMMITTED && TransactionIdPrecedes(xmax, minCommittedXmax)) { minCommittedXmax = xmax; } + if (TransactionIdFollows(xmax, maxXmax)) { maxXmax = xmax; } - if (!UBTreeVerifyTupleTransactionStatus(rel, blkno, xminStatus, xmaxStatus, xmin, xmax, xminCSN, xmaxCSN)) { + if (!UBTreeVerifyTupleTransactionStatus(rel, blkno, offnum, xminStatus, xmaxStatus, + xmin, xmax, xminCSN, xmaxCSN)) { return; } } @@ -588,8 +604,9 @@ void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber b UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); if (TransactionIdFollows(uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] xact xid is bigger than nextXid: relName=%s, blkno=%u, xact=%lu, nextxid=%lu.", - NameStr(rel->rd_rel->relname), blkno, uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + "[Verify UBTree] xact xid is bigger than nextXid: xact=%lu, nextxid=%lu, rnode[%u,%u,%u], block %u.", + uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); return; } if (!u_sess->attr.attr_storage.ustore_verify) { @@ -598,19 +615,21 @@ void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber b if (minCommittedXmax != MaxTransactionId && TransactionIdIsValid(pruneXid) && TransactionIdFollows(minCommittedXmax, pruneXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] min_committed_xmax is bigger than prune_xid: relName=%s, blkno=%u, prune_xid=%lu, minCommittedXmax=%lu.", - NameStr(rel->rd_rel->relname), blkno, pruneXid, minCommittedXmax))); + "[Verify UBTree] min_committed_xmax is bigger than prune_xid: prune_xid=%lu, minCommittedXmax=%lu, " + "rnode[%u,%u,%u], block %u.", + pruneXid, minCommittedXmax, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); return; } if (TransactionIdIsValid(maxXmax) && TransactionIdIsValid(ubtOpaque->last_delete_xid) && TransactionIdFollows(maxXmax, ubtOpaque->last_delete_xid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] max_xmax is bigger than last_delete_xid: relName=%s, blkno=%u, last_delete_xid on page=%lu, actual value=%lu.", - NameStr(rel->rd_rel->relname), blkno, ubtOpaque->last_delete_xid, maxXmax))); + "[Verify UBTree] max_xmax is bigger than last_delete_xid: last_delete_xid on page=%lu, actual value=%lu, " + "rnode[%u,%u,%u], block %u.", + ubtOpaque->last_delete_xid, maxXmax, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } } - + void UBTreeVerifyRowptrDML(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum) { if (u_sess->attr.attr_storage.ustore_verify) { @@ -622,7 +641,6 @@ void UBTreeVerifyRowptrDML(Relation rel, Page page, BlockNumber blkno, OffsetNum CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - Oid relOid = (rel ? rel->rd_id : InvalidOid); UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); OffsetNumber lastPos = PageGetMaxOffsetNumber(page); @@ -630,12 +648,12 @@ void UBTreeVerifyRowptrDML(Relation rel, Page page, BlockNumber blkno, OffsetNum return; } ItemIdSort indexSortPtr = (ItemIdSort)palloc0(sizeof(ItemIdSortData)); - UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, offnum, indexSortPtr, indexName, relOid); + UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, offnum, indexSortPtr, indexName, rel); pfree(indexSortPtr); UBTreeVerifyTupleKey(rel, page, blkno, offnum, firstPos, lastPos); } - + void UBTreeVerifyItems(Relation rel, BlockNumber blkno, TupleDesc desc, BTScanInsert cmpKeys, int keysz, IndexTuple currKey, IndexTuple nextKey, UBTPageOpaqueInternal opaque) { @@ -648,6 +666,8 @@ void UBTreeVerifyItems(Relation rel, BlockNumber blkno, TupleDesc desc, BTScanIn char *nextkeyDesc = NULL; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; + if (P_ISLEAF(opaque)) { index_deform_tuple(currKey, RelationGetDescr(rel), values, isnull); currkeyDesc = BuildIndexValueDescription(rel, values, isnull); @@ -655,14 +675,15 @@ void UBTreeVerifyItems(Relation rel, BlockNumber blkno, TupleDesc desc, BTScanIn nextkeyDesc = BuildIndexValueDescription(rel, values, isnull); } ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, relName=%s, blkno=%u.", (nextkeyDesc ? nextkeyDesc : "(unknown)"), - (currkeyDesc ? currkeyDesc : "(unknown)"), NameStr(rel->rd_rel->relname), blkno))); + "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, rnode[%u,%u,%u], block %u.", + (nextkeyDesc ? nextkeyDesc : "(unknown)"), (currkeyDesc ? currkeyDesc : "(unknown)"), + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } - + static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, OffsetNumber firstPos, OffsetNumber lastPos) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); TupleDesc desc = RelationGetDescr(rel); @@ -681,13 +702,12 @@ static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, Off } pfree(cmpKeys); } - + static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - Oid relOid = (rel ? rel->rd_id : InvalidOid); TupleDesc desc = RelationGetDescr(rel); int keysz = IndexRelationGetNumberOfKeyAttributes(rel); ItemIdSortData itemidBase[MaxIndexTuplesPerPage]; @@ -701,10 +721,11 @@ static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) } BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); - UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, firstPos, sortPtr, indexName, relOid); + UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, firstPos, sortPtr, indexName, rel); IndexTuple currKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); OffsetNumber nextPos = OffsetNumberNext(firstPos); sortPtr++; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; while (nextPos <= lastPos) { ItemId itemid = PageGetItemId(page, nextPos); IndexTuple nextKey = (IndexTuple)PageGetItem(page, itemid); @@ -720,15 +741,17 @@ static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); nextkeyDesc = BuildIndexValueDescription(rel, values, isnull); } - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, indexName=%s, oid %u, blkno=%u.", - (nextkeyDesc ? nextkeyDesc : "(unknown)"), (currkeyDesc ? currkeyDesc : "(unknown)"), indexName, relOid, blkno))); + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg( + "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, indexName=%s, " + "rnode[%u,%u,%u], block %u.", + (nextkeyDesc ? nextkeyDesc : "(unknown)"), (currkeyDesc ? currkeyDesc : "(unknown)"), indexName, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); pfree(cmpKeys); return; } } currKey = nextKey; - UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, nextPos, sortPtr, indexName, relOid); + UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, nextPos, sortPtr, indexName, rel); nextPos = OffsetNumberNext(nextPos); sortPtr++; } @@ -746,10 +769,12 @@ static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) ItemIdSort tempPtr2 = &itemidBase[i + 1]; if (tempPtr1->end > tempPtr2->start) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] Ubtree ItemIdSort conflict: indexName=%s, oid=%u, blkno=%u, ptr1offset %u, " - "ptr1start = %u, ptr1end = %u, ptr2offset = %u, ptr2start = %u, ptr2end = %u.", - indexName, relOid, blkno, tempPtr1->offset, tempPtr1->start, tempPtr1->end, - tempPtr2->offset, tempPtr2->start, tempPtr2->end))); + "[Verify UBTree] Ubtree ItemIdSort conflict: indexName=%s, ptr1offset %u, " + "ptr1start = %u, ptr1end = %u, ptr2offset = %u, ptr2start = %u, ptr2end = %u, " + "rnode[%u,%u,%u], block %u.", + indexName, tempPtr1->offset, tempPtr1->start, tempPtr1->end, + tempPtr2->offset, tempPtr2->start, tempPtr2->end, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); pfree(cmpKeys); return; } @@ -757,7 +782,7 @@ static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) pfree(cmpKeys); } - + void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) { BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); @@ -778,43 +803,47 @@ void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber o } TransactionId xidBase = ubtOpaque->xid_base; UBTreeVerifyPageXid(rel, blkno, xidBase, ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid)); - UBTreeVerifyTupleTransactionInfo(rel, page, offnum, fromInsert, xidBase); + UBTreeVerifyTupleTransactionInfo(rel, blkno, page, offnum, fromInsert, xidBase); } - + static void UBTreeVerifyHeader(PageHeaderData* page, Relation rel, BlockNumber blkno, uint16 pageSize, uint16 headerSize) { CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - + + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (pageSize != BLCKSZ || (page->pd_flags & ~PD_VALID_FLAG_BITS) != 0 || page->pd_lower < headerSize || page->pd_lower > page->pd_upper || page->pd_upper > page->pd_special || page->pd_special > BLCKSZ) { const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - Oid relOid = (rel ? rel->rd_id : InvalidOid); ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index page header invalid: indexName=%s, oid=%u, blkno=%u, size=%u," - "flags=%u, lower=%u, upper=%u, special=%u.", indexName, relOid, blkno, headerSize, - page->pd_flags, page->pd_lower, page->pd_upper, page->pd_special))); + "[Verify UBTree] index page header invalid: indexName=%s, size=%u," + "flags=%u, lower=%u, upper=%u, special=%u, rnode[%u,%u,%u], block %u.", indexName, headerSize, + page->pd_flags, page->pd_lower, page->pd_upper, page->pd_special, + rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } } - + static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber blkno, OffsetNumber offset, - ItemIdSort indexSortPtr, const char *indexName, Oid relOid) + ItemIdSort indexSortPtr, const char *indexName, Relation rel) { CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) ItemId itemId = PageGetItemId(page, offset); unsigned rpStart = ItemIdGetOffset(itemId); Size rpLen = ItemIdGetLength(itemId); + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (!ItemIdIsUsed(itemId)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] row pointer is unused: indexName=%s, oid=%u, blkno=%u, offset=%u, " - "rowPtr startOffset=%u, rowPtr len=%lu.", indexName, relOid, blkno, offset, rpStart, rpLen))); + "[Verify UBTree] row pointer is unused: indexName=%s, " + "rowPtr startOffset=%u, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + indexName, rpStart, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); return; } if (!ItemIdHasStorage(itemId)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] row pointer has no storage: indexName=%s, oid=%u, blkno=%u, offset=%u, " - "rowPtr startOffset=%u, rowPtr len=%lu.", indexName, relOid, blkno, offset, rpStart, rpLen))); + "[Verify UBTree] row pointer has no storage: indexName=%s, " + "rowPtr startOffset=%u, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + indexName, rpStart, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); return; } indexSortPtr->start = rpStart; @@ -822,15 +851,17 @@ static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber bl indexSortPtr->offset = offset; if (indexSortPtr->start < header->pd_upper || indexSortPtr->end > header->pd_special) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] The item corresponding to row pointer exceeds the range of item stored in the page: indexName=%s, oid=%u, " - "blkno=%u, offset=%u, rowPtr startOffset=%u, rowPtr len=%lu.", indexName, relOid, blkno, offset, rpStart, rpLen))); + "[Verify UBTree] The item corresponding to row pointer exceeds the range of item stored in the page: " + "indexName=%s, rowPtr startOffset=%u, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + indexName, rpStart, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); return; } int tupleSize = IndexTupleSize((IndexTuple)PageGetItem(page, itemId)); if (tupleSize > (int)rpLen) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] tuple size is bigger than item's len: indexName=%s, oid=%u, blkno=%u, offset=%u, " - "tuple size=%d, rowPtr len=%lu.", indexName, relOid, blkno, offset, tupleSize, rpLen))); + "[Verify UBTree] tuple size is bigger than item's len: indexName=%s, " + "tuple size=%d, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + indexName, tupleSize, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); return; } } diff --git a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp index d4e68db9b3..25ac4091e7 100644 --- a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp @@ -1222,13 +1222,12 @@ static void UBTRecycleQueueVerifyHeader(UBTRecycleQueueHeader header, Relation r CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) uint32 urqBlocks = MaxBlockNumber; - Oid relOid = InvalidOid; bool headerError = false; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (rel != NULL) { RelationOpenSmgr(rel); urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); - relOid = rel->rd_id; } headerError = (header->flags > (URQ_HEAD_PAGE | URQ_TAIL_PAGE)) || (IsNormalOffset(header->head) && !IsNormalOffset(header->tail)) || @@ -1238,13 +1237,14 @@ static void UBTRecycleQueueVerifyHeader(UBTRecycleQueueHeader header, Relation r if (headerError) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq header is invalid : oid=%u, blkno=%u, flags=%u, head=%d, tail=%d," - " free_items=%d, free_list_head=%d, prev_blkno=%u, next_blkno=%u", relOid, blkno, header->flags, - header->head, header->tail, header->freeItems, header->freeListHead, header->prevBlkno, header->nextBlkno))); + "[Verify URQ] urq header is invalid : flags=%u, head=%d, tail=%d, " + "free_items=%d, free_list_head=%d, prev_blkno=%u, next_blkno=%u, rnode[%u,%u,%u], block %u.", + header->flags, header->head, header->tail, header->freeItems, header->freeListHead, + header->prevBlkno, header->nextBlkno, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } } -void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Oid oid, BlockNumber blkno) +static void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno) { TransactionId maxXid = ReadNewTransactionId(); TransactionId prevXid = 0; @@ -1254,7 +1254,8 @@ void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Oid oid, BlockN uint16 prevOffset = InvalidOffset; UBTRecycleQueueItem item = NULL; - + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; + while (IsNormalOffset(currOffset) && itemCount <= itemMaxNum) { if (currOffset == itemMaxNum) { break; @@ -1287,9 +1288,10 @@ void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Oid oid, BlockN if (itemCount + header->freeItems != itemMaxNum) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq items are invalid : oid %u, blkno %u, (items info : curr_item_offset = %u, " - "prev_offset = %u, item_count = %u, free_list_offset = %u, free_items = %u, next_xid = %ld)", - oid, blkno, currOffset, prevOffset, itemCount, freelistOffset, header->freeItems, maxXid))); + "[Verify URQ] urq items are invalid : (items info : curr_item_offset = %u, " + "prev_offset = %u, item_count = %u, free_list_offset = %u, free_items = %u, next_xid = %ld), " + "rnode[%u,%u,%u], block %u", currOffset, prevOffset, itemCount, freelistOffset, header->freeItems, + maxXid, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } } @@ -1298,10 +1300,9 @@ static void UBTRecycleQueueVerifyItem(UBTRecycleQueueHeader header, Relation rel BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - Oid relOid = (rel ? rel->rd_id : InvalidOid); bool itemError = false; UBTRecycleQueueItem item = NULL; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (offnum != InvalidOffset) { item = &header->items[offnum]; @@ -1313,25 +1314,26 @@ static void UBTRecycleQueueVerifyItem(UBTRecycleQueueHeader header, Relation rel } if (itemError) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq item is invalid: oid=%u, blkno=%u, offset=%u, " - "(item info : xid=%ld blkno=%u prev=%u next=%u)", relOid, blkno, offnum, - item->xid, item->blkno, item->prev, item->next))); + "[Verify URQ] urq item is invalid: xid=%ld, blkno=%u, prev=%u, next=%u, rnode[%u,%u,%u], block %u", + item->xid, item->blkno, item->prev, item->next, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); } } CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - UBTRecycleQueueVerifyAllItems(header, relOid, blkno); + UBTRecycleQueueVerifyAllItems(header, rel, blkno); } static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, BlockNumber metaBlkno) { BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) BlockNumber indexBlocks = (rel == NULL ? metaData->nblocksUpper : RelationGetNumberOfBlocks(rel)); uint32 urqBlocks = MaxBlockNumber; Oid oid = InvalidOid; bool metaError = false; + RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (rel != NULL) { RelationOpenSmgr(rel); @@ -1344,9 +1346,9 @@ static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, Bloc if (metaError) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq meta is invalid : oid=%u, meta_blkno=%u, (meta info : headBlkno = %u, tailBlkno = %u, " - "nblocksUpper = %u, nblocksLower = %u; urq_blocks = %u, index_blocks = %u)", + "[Verify URQ] urq meta is invalid : (meta info : headBlkno = %u, tailBlkno = %u, " + "nblocksUpper = %u, nblocksLower = %u; urq_blocks = %u, index_blocks = %u), rnode[%u,%u,%u], block %u", oid, metaBlkno, metaData->headBlkno, metaData->tailBlkno, metaData->nblocksUpper, - metaData->nblocksLower, urqBlocks, indexBlocks))); + metaData->nblocksLower, urqBlocks, indexBlocks, rNode.spcNode, rNode.dbNode, rNode.relNode, metaBlkno))); } } \ No newline at end of file diff --git a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp index 34129979db..f0008ed4c8 100644 --- a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp @@ -367,7 +367,8 @@ int UHeapPagePrune(Relation relation, const RelationBuffer *relbuf, TransactionI END_CRIT_SECTION(); - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_ROWS)); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, NULL, BufferGetBlockNumber(relbuf->buffer), + false, (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_ROWS)); /* * Report the number of tuples reclaimed to pgstats. This is ndeleted diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 220b17284b..ea4aa14f78 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -722,7 +722,7 @@ reacquire_buffer: /* Clean up */ Assert(UHEAP_XID_IS_TRANS(tuple->disk_tuple->flag)); if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, false, + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, NULL, blkno, false, (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), ItemPointerGetOffsetNumber(&(tuple->ctid))); UndoRecordVerify(undorec); @@ -2316,7 +2316,7 @@ check_tup_satisfies_update: pfree(undotup.data); Assert(UHEAP_XID_IS_TRANS(utuple.disk_tuple->flag)); if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, NULL, blkno, false, (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), offnum); UndoRecord *undorec = (*t_thrd.ustore_cxt.urecvec)[0]; @@ -3315,13 +3315,16 @@ check_tup_satisfies_update: Assert(UHEAP_XID_IS_TRANS(uheaptup->disk_tuple->flag)); if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, - (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), ItemPointerGetOffsetNumber(&oldtup.ctid)); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, NULL, BufferGetBlockNumber(buffer), + false, (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), + ItemPointerGetOffsetNumber(&oldtup.ctid)); if (!useInplaceUpdate) { Page newPage = BufferGetPage(newbuf); - UpageVerify((UHeapPageHeader)newPage, InvalidXLogRecPtr, NULL, relation, false, - (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), ItemPointerGetOffsetNumber(&(uheaptup->ctid))); + UpageVerify((UHeapPageHeader)newPage, InvalidXLogRecPtr, NULL, relation, NULL, + BufferGetBlockNumber(newbuf), false, + (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), + ItemPointerGetOffsetNumber(&(uheaptup->ctid))); } } @@ -3653,9 +3656,10 @@ reacquire_buffer: END_CRIT_SECTION(); if (u_sess->attr.attr_storage.ustore_verify_level >= USTORE_VERIFY_FAST) { - UpageVerifyHeader((UHeapPageHeader)page, InvalidXLogRecPtr, relation); + BlockNumber blkno = BufferGetBlockNumber(buffer); + UpageVerifyHeader((UHeapPageHeader)page, InvalidXLogRecPtr, &relation->rd_node, blkno); for (int k = 0; k < nthispage; k++) { - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, false, + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, NULL, false, (USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROW), verifyOffnum[k]); } } @@ -4199,7 +4203,7 @@ bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquir } cleanup: - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, NULL, BufferGetBlockNumber(buf)); if (frozenSlots != NULL) pfree(frozenSlots); @@ -5788,7 +5792,7 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) END_CRIT_SECTION(); - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relation, NULL, blkno); UnlockReleaseBuffer(buffer); diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index bfff75ed32..57defd5233 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -297,7 +297,7 @@ void ExecuteUndoActionsPage(UndoRecPtr fromUrp, Relation rel, Buffer buffer, Tra END_CRIT_SECTION(); } - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, NULL, BufferGetBlockNumber(buffer)); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } @@ -612,7 +612,8 @@ int UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, TransactionI } END_CRIT_SECTION(); - UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relationData.relation); + UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, relationData.relation, NULL, + BufferGetBlockNumber(buffer)); UnlockReleaseBuffer(buffer); diff --git a/src/gausskernel/storage/access/ustore/knl_upage.cpp b/src/gausskernel/storage/access/ustore/knl_upage.cpp index a2f8e45667..ca8947dd08 100644 --- a/src/gausskernel/storage/access/ustore/knl_upage.cpp +++ b/src/gausskernel/storage/access/ustore/knl_upage.cpp @@ -29,9 +29,10 @@ #define ISNULL_BITMAP_NUMBER 2 #define HIGH_BITS_LENGTH_OF_LSN 32 -static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber off, TupleDesc tupDesc, Relation rel, bool isRedo = false); -static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRedo = false); -static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Relation rel); +static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber off, TupleDesc tupDesc, Relation rel, + RelFileNode* rNode, BlockNumber blkno, bool isRedo = false); +static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, BlockNumber blkno, bool isRedo = false); +static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, RelFileNode* rNode, BlockNumber blkno); template void UPageInit(Page page, Size pageSize, Size specialSize, uint8 tdSlots) { @@ -702,100 +703,106 @@ static int getModule(bool isRedo) return isRedo ? USTORE_VERIFY_MOD_REDO : USTORE_VERIFY_MOD_UPAGE; } - - -void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, Relation rel, bool isRedo, uint8 mask, OffsetNumber num) +void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, Relation rel, + RelFileNode* rNode, BlockNumber blkno, bool isRedo, uint8 mask, OffsetNumber num) { BYPASS_VERIFY(getModule(isRedo), rel); + + if (!isRedo) { + rNode = rel ? &rel->rd_node : NULL; + } + if (rNode == NULL) { + RelFileNode invalidRelFileNode = {InvalidOid, InvalidOid, InvalidOid}; + rNode = &invalidRelFileNode; + } CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST); uint8 curMask = mask & USTORE_VERIFY_UPAGE_MASK; if ((curMask & USTORE_VERIFY_UPAGE_HEADER) > 0) { - UpageVerifyHeader(header, lastRedo, rel, isRedo); + UpageVerifyHeader(header, lastRedo, rNode, blkno, isRedo); } - if ((curMask & USTORE_VERIFY_UPAGE_ROWS) > 0) { - UpageVerifyAllRowptr(header, rel, isRedo); + if (num != InvalidOffsetNumber) { + if ((curMask & USTORE_VERIFY_UPAGE_ROW) > 0) { + RowPtr *rowptr = UPageGetRowPtr(header, num); + UpageVerifyRowptr(rowptr, (Page)header, num, rNode, blkno); + } + if ((curMask & USTORE_VERIFY_UPAGE_TUPLE) > 0) { + UpageVerifyTuple(header, num, tupDesc, rel, rNode, blkno, isRedo); + } } - if ((curMask & USTORE_VERIFY_UPAGE_ROW) > 0 && num != InvalidOffsetNumber) { - RowPtr *rowptr = UPageGetRowPtr(header, num); - UpageVerifyRowptr(rowptr, (Page)header, num, rel); + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE); + if ((curMask & USTORE_VERIFY_UPAGE_ROWS) > 0) { + UpageVerifyAllRowptr(header, rNode, blkno, isRedo); } - if (curMask & USTORE_VERIFY_UPAGE_TUPLE) { - if (num != InvalidOffsetNumber) { - UpageVerifyTuple(header, num, tupDesc, rel, isRedo); - } else { + if ((curMask & USTORE_VERIFY_UPAGE_TUPLE) > 0) { + if (num == InvalidOffsetNumber) { for (OffsetNumber offNum= FirstOffsetNumber; offNum <= UHeapPageGetMaxOffsetNumber((char *)header); offNum++) { - UpageVerifyTuple(header, offNum, tupDesc, rel, isRedo); + UpageVerifyTuple(header, offNum, tupDesc, rel, rNode, blkno, isRedo); } } - } - } -void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, Relation rel, bool isRedo) +void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, RelFileNode* rNode, BlockNumber blkno, bool isRedo) { - if (lastRedo != InvalidXLogRecPtr && PageGetLSN(header) < lastRedo) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|HEADER] Current lsn(%X/%X) in page is smaller than last checkpoint(%X/%X)).", - (uint32)(PageGetLSN(header) >> HIGH_BITS_LENGTH_OF_LSN), (uint32)PageGetLSN(header), - (uint32)(lastRedo >> HIGH_BITS_LENGTH_OF_LSN), (uint32)lastRedo))); + errmsg("[UPAGE_VERIFY|HEADER] Current lsn(%X/%X) in page is smaller than last checkpoint(%X/%X)), " + "rnode[%u,%u,%u], block %u.", (uint32)(PageGetLSN(header) >> HIGH_BITS_LENGTH_OF_LSN), + (uint32)PageGetLSN(header), (uint32)(lastRedo >> HIGH_BITS_LENGTH_OF_LSN), (uint32)lastRedo, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); } if (unlikely(header->pd_lower < (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(header)) || header->pd_lower > header->pd_upper || header->pd_upper > header->pd_special || header->potential_freespace > BLCKSZ || header->pd_special != BLCKSZ)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|HEADER] lower = %u, upper = %u, special = %u, potential = %u.", - header->pd_lower, header->pd_upper, header->pd_special, header->potential_freespace))); + errmsg("[UPAGE_VERIFY|HEADER] lower = %u, upper = %u, special = %u, potential = %u," + " rnode[%u,%u,%u], block %u.", header->pd_lower, header->pd_upper, header->pd_special, + header->potential_freespace, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); } if (header->td_count <= 0 || header->td_count > UHEAP_MAX_TD) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|HEADER] tdcount invalid: tdcount = %u.", header->td_count))); + errmsg("[UPAGE_VERIFY|HEADER] tdcount invalid: tdcount = %u, rnode[%u,%u,%u], block %u.", + header->td_count, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); } if (TransactionIdFollows(header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|HEADER] prune_xid invalid: prune_xid = %lu, nextxid = %lu.", - header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + errmsg("[UPAGE_VERIFY|HEADER] prune_xid invalid: prune_xid = %lu, nextxid = %lu." + " rnode[%u,%u,%u], block %u.", header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); } } -static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber off, TupleDesc tupDesc, Relation rel, bool isRedo) +static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleDesc tupDesc, Relation rel, + RelFileNode* rNode, BlockNumber blkno, bool isRedo) { - RowPtr *rp = NULL; UHeapDiskTuple diskTuple = NULL; int tdSlot = InvalidTDSlotId; bool hasInvalidXact = false; TransactionId tupXid = InvalidTransactionId; UHeapTupleTransInfo td_info = {InvalidTDSlotId, InvalidTransactionId, InvalidCommandId, INVALID_UNDO_REC_PTR}; - - rp = UPageGetRowPtr(header, off); + + rp = UPageGetRowPtr(header, offnum); if (RowPtrIsNormal(rp)) { diskTuple = (UHeapDiskTuple)UPageGetRowData(header, rp); tdSlot = UHeapTupleHeaderGetTDSlot(diskTuple); hasInvalidXact = UHeapTupleHasInvalidXact(diskTuple->flag); tupXid = UDiskTupleGetModifiedXid(diskTuple, (Page)header); - int tup_size = 0; - tup_size = (rel == NULL) ? 0 : CalTupSize(rel, diskTuple, tupDesc); - if (tup_size > (int)RowPtrGetLen(rp) || (diskTuple->reserved != 0 && - diskTuple->reserved != 0xFF)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE]corrupted tuple: tupsize = %d, rpsize = %u.", - tup_size, RowPtrGetLen(rp)))); - return; - } td_info.td_slot = tdSlot; if ((tdSlot != UHEAPTUP_SLOT_FROZEN)) { if (tdSlot < 1 || tdSlot > header->td_count) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE] tdSlot out of bounds, tdSlot = %d, td_count = %d.", tdSlot, header->td_count))); + errmsg("[UPAGE_VERIFY|TUPLE]tdSlot out of bounds, tdSlot = %d, td_count = %d, " + "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, header->td_count, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } @@ -809,52 +816,66 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber off, TupleDesc TransactionId xid = this_trans->xactid; if (TransactionIdFollows(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE] tdxid invalid: tdSlot = %d, tdxid = %lu, nextxid = %lu.", - tdSlot, xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + errmsg("[UPAGE_VERIFY|TUPLE]tdxid invalid: tdSlot = %d, tdxid = %lu, nextxid = %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); } - if (TransactionIdIsValid(xid) && !TransactionIdDidCommit(xid) && + if (TransactionIdIsValid(xid) && !UHeapTransactionIdDidCommit(xid) && TransactionIdPrecedes(xid, g_instance.undo_cxt.globalFrozenXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE] Transaction %lu in tdslot(%d) is smaller than global frozen xid %lu.", - xid, tdSlot, g_instance.undo_cxt.globalFrozenXid))); + errmsg("[UPAGE_VERIFY|TUPLE]tdxid %lu in tdslot(%d) is smaller than global frozen xid %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", xid, tdSlot, g_instance.undo_cxt.globalFrozenXid, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); } } if (!hasInvalidXact && IS_VALID_UNDO_REC_PTR(td_info.urec_add) && - (!TransactionIdIsValid(td_info.xid) || (TransactionIdIsValid(tupXid) && !TransactionIdEquals(td_info.xid, tupXid)))) { + (!TransactionIdIsValid(td_info.xid) || (TransactionIdIsValid(tupXid) && + !TransactionIdEquals(td_info.xid, tupXid)))) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE] tup xid inconsistency with td: tupxid = %lu, tdxid = %lu, urp %lu.", - tupXid, td_info.xid, td_info.urec_add))); + errmsg("[UPAGE_VERIFY|TUPLE] tup xid inconsistency with td: tupxid = %lu, tdxid = %lu, urp %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", tupXid, td_info.xid, td_info.urec_add, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + int tupSize = (rel == NULL) ? 0 : CalTupSize(rel, diskTuple, tupDesc); + if (tupSize > (int)RowPtrGetLen(rp) || (diskTuple->reserved != 0 && + diskTuple->reserved != 0xFF)) { + ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("[UPAGE_VERIFY|TUPLE]corrupted tuple: tupsize = %d, rpsize = %u, " + "rnode[%u,%u,%u], block %u, offnum %u.", + tupSize, RowPtrGetLen(rp), rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); + return; + } + if (!TransactionIdIsValid(tupXid)) { return; } - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + if (hasInvalidXact) { if (!UHeapTransactionIdDidCommit(tupXid) && !t_thrd.xlog_cxt.InRecovery) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE] tup xid not commit, tupxid = %lu.", tupXid))); + errmsg("[UPAGE_VERIFY|TUPLE] tup xid not commit, tupxid = %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", tupXid, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } if (TransactionIdEquals(td_info.xid, tupXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|TUPLE] td reused but xid equal td: tupxid = %lu, tdxid = %lu.", - tupXid, td_info.xid))); + errmsg("[UPAGE_VERIFY|TUPLE] td reused but xid equal td: tupxid = %lu, tdxid = %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", tupXid, td_info.xid, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } } } } -static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Relation rel) +static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, RelFileNode* rNode, BlockNumber blkno) { - BYPASS_VERIFY(USTORE_VERIFY_MOD_UPAGE, rel); - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) UHeapPageHeader phdr = (UHeapPageHeader)page; int nline = UHeapPageGetMaxOffsetNumber(page); UHeapDiskTuple diskTuple = (UHeapDiskTuple)UPageGetRowData(page, rowPtr); @@ -869,14 +890,17 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re if (!RowPtrIsNormal(rowPtr)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr is abnormal (flags:%d, offset %d, len %d).", - rowPtr->flags, offset, len))); + errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr is abnormal (flags:%d, offset %d, len %d), " + "rnode[%u,%u,%u], block %u, offnum %u.", rowPtr->flags, offset, len, rNode->spcNode, + rNode->dbNode, rNode->relNode, blkno, offnum))); return; } if (tdSlot < 1 || tdSlot > phdr->td_count) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ROWPTR] Invalid tdSlot %d, td count of page is %d.", tdSlot, phdr->td_count))); + errmsg("[UPAGE_VERIFY|ROWPTR] Invalid tdSlot %d, td count of page is %d, " + "rnode[%u,%u,%u], block %u, offnum %u.", + tdSlot, phdr->td_count, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } @@ -886,17 +910,23 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re if (UHEAP_XID_IS_LOCK(diskTuple->flag)) { if (!TransactionIdEquals(locker, topXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ROWPTR] locker invalid: locker %lu, topxid %lu.", locker, topXid))); + errmsg("[UPAGE_VERIFY|ROWPTR] locker invalid: locker %lu, topxid %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", + locker, topXid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } } else if (!IS_VALID_UNDO_REC_PTR(tdUrp) || hasInvalidXact || !TransactionIdEquals(tdXid, locker) || !TransactionIdEquals(tdXid, topXid) || !TransactionIdEquals(tdXid, tupXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Td xid invalid: tdSlot %d, tdxid %lu, topxid %lu, " - "tupxid %lu, isInvalidSlot %d.", tdSlot, tdXid, topXid, tupXid, hasInvalidXact))); + "tupxid %lu, isInvalidSlot %d, rnode[%u,%u,%u], block %u, offnum %u.", + tdSlot, tdXid, topXid, tupXid, hasInvalidXact, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } - for (int i = FirstOffsetNumber; i <= nline; i++) { + + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + for (OffsetNumber i = FirstOffsetNumber; i <= nline; i++) { if (i == offnum) { continue; } @@ -908,25 +938,29 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re if (tupOffset + tupLen > offset) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr data is abnormal, flags %d, offset %u," - " len %d, alignTupLen %u, targetRpOffset %u", - rp->flags, tupOffset, RowPtrGetLen(rp), tupLen, offset))); + " len %d, alignTupLen %u, targetRpOffset %u, " + "rnode[%u,%u,%u], block %u, offnum %u, offnum2 %u.", + rp->flags, tupOffset, RowPtrGetLen(rp), tupLen, offset, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum, i))); } } else if (offset + len > tupOffset) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr data is abnormal, flags %d, offset %u," - " len %d, alignTupLen %u, targetRpOffset %u, targetRpLen %u.", - rp->flags, tupOffset, RowPtrGetLen(rp), tupLen, offset, len))); + " len %d, alignTupLen %u, targetRpOffset %u, targetRpLen %u, " + "rnode[%u,%u,%u], block %u, offnum %u, offnum2 %u.", + rp->flags, tupOffset, RowPtrGetLen(rp), tupLen, offset, len, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum, i))); } } } } -static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRedo) +static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, BlockNumber blkno, bool isRedo) { int nline = UHeapPageGetMaxOffsetNumber((char *)header); int tdSlot = 0; int nstorage = 0; - int i; + OffsetNumber i; RpSortData rowptrs[MaxPossibleUHeapTuplesPerPage]; RpSort sortPtr = rowptrs; RowPtr *rp = NULL; @@ -936,13 +970,13 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRe if (RowPtrIsNormal(rp)) { sortPtr->start = (int)RowPtrGetOffset(rp); sortPtr->end = sortPtr->start + (int)SHORTALIGN(RowPtrGetLen(rp)); - sortPtr->offset = i; if (sortPtr->start < header->pd_upper || sortPtr->end > header->pd_special) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ALLROWPTR]corrupted rowptr: offset = %u, rpstart = %u, " - "rplen = %u, pdlower = %u, pdupper = %u.", - i, RowPtrGetOffset(rp), RowPtrGetLen(rp), header->pd_lower, header->pd_upper))); + errmsg("[UPAGE_VERIFY|ALLROWPTR] rpstart = %u, rplen = %u, pdlower = %u, pdupper = %u, " + "rnode[%u,%u,%u], block %u, offnum %u.", + RowPtrGetOffset(rp), RowPtrGetLen(rp), header->pd_lower, header->pd_upper, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, i))); return; } sortPtr++; @@ -950,22 +984,28 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRe tdSlot = RowPtrGetTDSlot(rp); if (tdSlot == UHEAPTUP_SLOT_FROZEN) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ALLROWPTR]rowptr(offsetnumber = %d) tdslot frozen, tdSlot = %d.", i, tdSlot))); + errmsg("[UPAGE_VERIFY|ALLROWPTR] tdslot frozen, tdSlot = %d, " + "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, i))); return; } - - UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(header); - TD * this_trans = &tdPtr->td_info[tdSlot - 1]; + if (tdSlot < 1 || tdSlot > header->td_count) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ALLROWPTR]tdSlot out of bounds, tdSlot = %d, td_count = %d.", tdSlot, header->td_count))); + errmsg("[UPAGE_VERIFY|ALLROWPTR] tdSlot out of bounds, tdSlot = %d, " + "td_count = %d, rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, header->td_count, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, i))); return; } - + + UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(header); + TD * this_trans = &tdPtr->td_info[tdSlot - 1]; if (TransactionIdFollows(this_trans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ALLROWPTR]tdxid invalid: tdSlot %d, tdxid = %lu, nextxid = %lu.", - tdSlot, this_trans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + errmsg("[UPAGE_VERIFY|ALLROWPTR] tdxid invalid: tdSlot %d, tdxid = %lu, " + "nextxid = %lu, rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, this_trans->xactid, + t_thrd.xact_cxt.ShmemVariableCache->nextXid, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, i))); return; } } @@ -985,10 +1025,11 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, Relation rel, bool isRe RpSort temp_ptr2 = &rowptrs[i + 1]; if (temp_ptr1->end > temp_ptr2->start) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("[UPAGE_VERIFY|ALLROWPTR]corrupted line pointer: rp1offset %u, rp1start = %u, rp1end = %u, " - "rp2offset = %u, rp2start = %u, rp2end = %u.", + errmsg("[UPAGE_VERIFY|ALLROWPTR]corrupted line pointer: rp1offnum %u, rp1start = %u, rp1end = %u, " + "rp2offnum = %u, rp2start = %u, rp2end = %u, rnode[%u,%u,%u], block %u.", temp_ptr1->offset, temp_ptr1->start, temp_ptr1->end, - temp_ptr2->offset, temp_ptr2->start, temp_ptr2->end))); + temp_ptr2->offset, temp_ptr2->start, temp_ptr2->end, + rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); return; } } diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index 69c4059d5a..cdcda5d982 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -281,7 +281,7 @@ void UHeapXlogInsert(XLogReaderState *record) Page page = BufferGetPage(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &targetNode, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -476,7 +476,7 @@ static void UHeapXlogDelete(XLogReaderState *record) Page page = BufferGetPage(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &targetNode, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -537,7 +537,7 @@ static void UHeapXlogFreezeTdSlot(XLogReaderState *record) MarkBufferDirty(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &rnode, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -582,7 +582,7 @@ static void UHeapXlogInvalidTdSlot(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, NULL, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -731,7 +731,7 @@ static void UHeapXlogClean(XLogReaderState *record) Page page = BufferGetPage(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &rnode, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -1318,7 +1318,7 @@ static void UHeapXlogUpdate(XLogReaderState *record) Page page = BufferGetPage(buffers.newbuffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &rnode, BufferGetBlockNumber(buffers.newbuffer.buf), true); } if (BufferIsValid(buffers.newbuffer.buf) && buffers.newbuffer.buf != buffers.oldbuffer.buf) { @@ -1645,7 +1645,7 @@ static void UHeapXlogMultiInsert(XLogReaderState *record) Page page = BufferGetPage(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &rnode, blkno, true); } pfree(ufreeOffsetRanges); @@ -1674,7 +1674,7 @@ static void UHeapXlogBaseShift(XLogReaderState *record) MarkBufferDirty(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, NULL, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -1744,7 +1744,7 @@ static void UHeapXlogExtendTDSlot(XLogReaderState *record) MarkBufferDirty(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, NULL, blkno, true); } if (BufferIsValid(buffer.buf)) { @@ -1821,7 +1821,7 @@ static void UHeapXlogFreeze(XLogReaderState *record) MarkBufferDirty(buffer.buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, &rnode, blkno, true); } if (BufferIsValid(buffer.buf)) { UnlockReleaseBuffer(buffer.buf); @@ -1997,7 +1997,7 @@ static void UHeapUndoXlogPage(XLogReaderState *record) MarkBufferDirty(buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, NULL, blkno, true); } if (BufferIsValid(buf)) @@ -2022,7 +2022,7 @@ static void UHeapUndoXlogResetXid(XLogReaderState *record) Page page = BufferGetPage(buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, NULL, blkno, true); } if (BufferIsValid(buf)) @@ -2087,7 +2087,7 @@ static void UHeapUndoXlogAbortSpecinsert(XLogReaderState *record) Page page = BufferGetPage(buf); UpageVerify((UHeapPageHeader)page, t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr, NULL, - NULL, true); + NULL, NULL, blkno, true); } if (BufferIsValid(buf)) diff --git a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp index f7f7376d64..d673af59dd 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp @@ -632,22 +632,22 @@ void UndoRecordVerify(_in_ UndoRecord *urec) { UNDO_BYPASS_VERIFY; - CHECK_VERIFY_LEVEL(USTORE_VERIFY_DEFAULT) + CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) if (!TransactionIdIsValid(urec->Xid())) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu is invalid"), urec->Xid()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu is invalid, urp %lu"), urec->Xid(), urec->Urp()))); } if (TransactionIdIsValid(urec->Xid()) && TransactionIdFollowsOrEquals(urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu >= nextXid %lu"), - urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu >= nextXid %lu, urp %lu"), + urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid, urec->Urp()))); } if (TransactionIdIsValid(urec->OldXactId()) && TransactionIdFollowsOrEquals(urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. oldXactId %lu >= nextXid %lu"), - urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. oldXactId %lu >= nextXid %lu, urp %lu"), + urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid, urec->Urp()))); } if (!(IS_VALID_UNDO_REC_PTR(urec->Urp()))) { ereport(WARNING, (errmodule(MOD_UNDO), @@ -655,6 +655,7 @@ void UndoRecordVerify(_in_ UndoRecord *urec) return; } + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) int zoneId = (int)UNDO_PTR_GET_ZONE_ID(urec->Urp()); undo::UndoZone *uzone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); Assert(uzone != NULL); @@ -673,37 +674,39 @@ void UndoRecordVerify(_in_ UndoRecord *urec) undo::UndoZone *blkPrevZone = undo::UndoZoneGroup::GetUndoZone(blkPrevZid, false); if (urec->Blkprev() > blkPrevZone->GetInsertURecPtr()) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. Blkprev %lu > insertURecPtr %lu, zoneId %d"), - urec->Blkprev(), uzone->GetInsertURecPtr(), zoneId))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. Blkprev %lu > insertURecPtr %lu, zoneId %d, urp %lu"), + urec->Blkprev(), uzone->GetInsertURecPtr(), zoneId, urec->Urp()))); } } if ((urec->Uinfo() & UNDO_UREC_INFO_TRANSAC) != 0 || (urec->Uinfo() & UNDO_UREC_INFO_BLOCK) != 0) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d error"), (int)urec->Uinfo()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d error, urp %lu"), + (int)urec->Uinfo(), urec->Urp()))); } if ((urec->Uinfo() & UNDO_UREC_INFO_OLDTD) != 0 && !TransactionIdIsValid(urec->OldXactId())) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, oldXactId %lu is invalid"), - (int)urec->Uinfo(), urec->OldXactId()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, oldXactId %lu is invalid, urp %lu"), + (int)urec->Uinfo(), urec->OldXactId(), urec->Urp()))); } if ((urec->Uinfo() & UNDO_UREC_INFO_HAS_PARTOID) != 0 && urec->Partitionoid() == InvalidOid) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu, uinfo %d, partitionoid is invalid"), - urec->Urp(), (int)urec->Uinfo()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, partitionoid is invalid, urp %lu"), + (int)urec->Uinfo(), urec->Urp()))); } if ((urec->Uinfo() & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0 && urec->Tablespace() == InvalidOid) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu, uinfo %d, tablespace is invalid"), - urec->Urp(), (int)urec->Uinfo()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, tablespace is invalid, urp %lu"), + (int)urec->Uinfo(), urec->Urp()))); } if (urec->Utype() <= UNDO_UNKNOWN || urec->Utype() > UNDO_UPDATE) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d is invalid"), urec->Utype()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d is invalid, urp %lu"), + urec->Utype(), urec->Urp()))); } if ((urec->Utype() == UNDO_INSERT && urec->PayLoadLen() != 0) || (urec->Utype() == UNDO_INSERT && (urec->Uinfo() & UNDO_UREC_INFO_PAYLOAD) != 0)) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d , payLoadLen %hu, uinfo %d"), - urec->Utype(), urec->PayLoadLen(), (int)urec->Uinfo()))); + errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d , payLoadLen %hu, uinfo %d, urp %lu"), + urec->Utype(), urec->PayLoadLen(), (int)urec->Uinfo(), urec->Urp()))); } } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp index fcc253effd..18154077ca 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp @@ -345,7 +345,7 @@ static void verifyXid(TransactionSlot *slot) if (TransactionIdIsValid(xid) && TransactionIdFollowsOrEquals(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. slot xactId %lu >= nextXid%lu"), + errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. slot xactId %lu >= nextXid %lu"), xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } } diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index 302f6056d8..2ef78734ab 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -349,10 +349,11 @@ inline OffsetNumber UHeapPageGetMaxOffsetNumber(char *upage) #define USTORE_VERIFY_UPAGE_DEFAULT (USTORE_VERIFY_UPAGE_HEADER | USTORE_VERIFY_UPAGE_TUPLE | USTORE_VERIFY_UPAGE_ROWS) -void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, Relation rel, - bool isRedo = false, uint8 mask = USTORE_VERIFY_UPAGE_DEFAULT, - OffsetNumber num = InvalidOffsetNumber /* for single TUPLE and ROW */); +void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, Relation rel, + RelFileNode* rNode, BlockNumber blkno, bool isRedo = false, uint8 mask = USTORE_VERIFY_UPAGE_DEFAULT, + OffsetNumber num = InvalidOffsetNumber); -void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, Relation rel, bool isRedo = false); +void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, RelFileNode* rNode, + BlockNumber blkno, bool isRedo = false); #endif -- Gitee From ecef881857e19d76d84267cefcc2d69af35a1c58 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Sat, 10 Aug 2024 14:53:29 +0800 Subject: [PATCH 192/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BEredo=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E6=8A=A5=E9=94=99=20can=20not=20find=20target=20block?= =?UTF-8?q?-record?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/redo/redo_xlogutils.cpp | 2 +- .../ondemand_extreme_rto/redo_utils.cpp | 34 +++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/redo/redo_xlogutils.cpp b/src/gausskernel/storage/access/redo/redo_xlogutils.cpp index 04d758fa08..cc40300a2d 100644 --- a/src/gausskernel/storage/access/redo/redo_xlogutils.cpp +++ b/src/gausskernel/storage/access/redo/redo_xlogutils.cpp @@ -316,6 +316,7 @@ void XLogRecSetBlockCommonState(XLogReaderState *record, XLogBlockParseEnum bloc blockparse->blockhead.opt = filenode.rnode.node.opt; blockparse->blockhead.blkno = filenode.segno; blockparse->blockhead.forknum = filenode.forknumber; + blockparse->blockhead.hasCSN = XLogRecHasCSN(record); blockparse->redohead.xl_term = XLogRecGetTerm(record); @@ -432,7 +433,6 @@ void XLogRecSetBlockDataState(XLogReaderState *record, uint32 blockid, XLogRecPa XLogRecSetBlockDataStateContent(record, blockid, blockdatarec); recordblockstate->blockparse.blockhead.is_conflict_type = is_conflict_type; - recordblockstate->blockparse.blockhead.hasCSN = XLogRecHasCSN(record); } void XLogRecSetAuxiBlkNumState(XLogBlockDataParse *blockdatarec, BlockNumber auxilaryblkn1, BlockNumber auxilaryblkn2) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index b72af7fc53..b2b639caf3 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -31,6 +31,9 @@ #include "storage/lock/lwlock.h" #include "catalog/storage_xlog.h" + +static void PrintXLogRecParseStateBlockHead(XLogRecParseState* blockState); + /* * Add xlog reader private structure for page read. */ @@ -533,6 +536,10 @@ bool IsRecParseStateHaveChildState(XLogRecParseState *checkState) return false; } +/** + * Find out target blockState from checkState and its nextrecords, by checking if any blockState + * has the same blockhead with the target blockState, and release others. Used in ondemand-recovery redo phase. + */ static XLogRecParseState *OndemandFindTargetBlockStateInOndemandRedo(XLogRecParseState *checkState, XLogRecParseState *srcState) { @@ -601,6 +608,7 @@ XLogRecParseState *OndemandRedoReloadXLogRecord(XLogRecParseState *hashmapBlockS // step3: find target parse state XLogRecParseState *targetState = OndemandFindTargetBlockStateInOndemandRedo(recordBlockState, hashmapBlockState); if (targetState == NULL) { + PrintXLogRecParseStateBlockHead(hashmapBlockState); ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("[On-demand] reload xlog record failed at %X/%X, spc/db/rel/bucket " "fork-block: %u/%u/%u/%d %d-%u, errormsg: can not find target block-record", @@ -682,4 +690,30 @@ void OnDemandNotifyHashMapPruneIfNeed() if (SS_ONDEMAND_RECOVERY_HASHMAP_FULL) { ondemand_extreme_rto::StartupSendMarkToBatchRedo(&ondemand_extreme_rto::g_hashmapPruneMark); } +} + +static void PrintXLogRecParseStateBlockHead(XLogRecParseState* blockState) { + StringInfoData res; + initStringInfo(&res); + appendStringInfo(&res, "{start_ptr: %X/%X, ", (uint32)(blockState->blockparse.blockhead.start_ptr>> 32), + (uint32)blockState->blockparse.blockhead.start_ptr); + appendStringInfo(&res, "end_ptr: %X/%X, ", (uint32)(blockState->blockparse.blockhead.end_ptr>> 32), + (uint32)blockState->blockparse.blockhead.end_ptr); + appendStringInfo(&res, "blkno: %u, ", blockState->blockparse.blockhead.blkno); + appendStringInfo(&res, "relNode: %u, ", blockState->blockparse.blockhead.relNode); + appendStringInfo(&res, "block_valid: %u, ", (uint32)(blockState->blockparse.blockhead.block_valid)); + appendStringInfo(&res, "xl_info: %u, ", (uint32)(blockState->blockparse.blockhead.xl_info)); + appendStringInfo(&res, "block_valid: %u, ", (uint32)(blockState->blockparse.blockhead.xl_info)); + appendStringInfo(&res, "xl_rmid: %u, ", (uint32)(blockState->blockparse.blockhead.xl_rmid)); + appendStringInfo(&res, "forknum: %d, ", blockState->blockparse.blockhead.forknum); + appendStringInfo(&res, "xl_xid: %lu, ", blockState->blockparse.blockhead.xl_xid); + appendStringInfo(&res, "spcNode: %u, ", (uint32)(blockState->blockparse.blockhead.spcNode)); + appendStringInfo(&res, "dbNode: %u, ", (uint32)(blockState->blockparse.blockhead.dbNode)); + appendStringInfo(&res, "bucketNode: %d, ", (int)(blockState->blockparse.blockhead.bucketNode)); + appendStringInfo(&res, "opt: %u, ", (uint32)(blockState->blockparse.blockhead.opt)); + appendStringInfo(&res, "is_conflict_type: %u, ", (uint32)(blockState->blockparse.blockhead.is_conflict_type)); + appendStringInfo(&res, "hasCSN: %u}; ", (uint32)(blockState->blockparse.blockhead.hasCSN)); + ereport(LOG, + (errmsg("[On-demand][redo] blockState->blockparse.blockhead: %s.", + res.data))); } \ No newline at end of file -- Gitee From 8a88ae5ae0dc967c74a93551bf3d28bc8e7d0320 Mon Sep 17 00:00:00 2001 From: z00848344 Date: Tue, 20 Aug 2024 14:58:51 +0800 Subject: [PATCH 193/347] On branch io_limits Your branch is up to date with 'origin/io_limits'. Changes to be committed: modified: src/bin/gs_guc/cluster_guc.conf --- src/bin/gs_guc/cluster_guc.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 87ebb5d0f2..f37fc3102e 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -318,6 +318,7 @@ ignore_system_indexes|bool|0,0|NULL|When ignore_system_indexes set to on, it is sql_ignore_strategy|string|0,0|NULL|NULL| parctl_min_cost|int|-1,2147483647|NULL|NULL| io_control_unit|int|1000,1000000|NULL|NULL| +io_limits|int|0,1073741823|NULL|NULL| gin_pending_list_limit|int|64,2147483647|kB|NULL| intervalstyle|enum|postgres,postgres_verbose,sql_standard,iso_8601,a|NULL|NULL| join_collapse_limit|int|1,2147483647|NULL|NULL| -- Gitee From 7b691eed29212e67353ef3eece9d8d39c02116e6 Mon Sep 17 00:00:00 2001 From: lukeman Date: Fri, 16 Aug 2024 10:54:13 +0800 Subject: [PATCH 194/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E3=80=90pg=E5=85=BC=E5=AE=B9=E6=80=A7=E3=80=91=E7=BB=99?= =?UTF-8?q?=E8=A1=A8=E5=88=9B=E5=BB=BA=E8=A7=86=E5=9B=BE=EF=BC=8C=E5=B9=B6?= =?UTF-8?q?=E7=BB=99=E8=A1=A8=E5=A2=9E=E5=8A=A0=E4=B8=80=E5=88=97=EF=BC=8C?= =?UTF-8?q?=E5=AF=B9=E8=A1=A8=E5=A4=87=E4=BB=BD=E5=90=8E=E7=94=A8gs=5Frest?= =?UTF-8?q?ore=E6=81=A2=E5=A4=8D=E6=97=B6=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/tablecmds.cpp | 15 +++++++++++++++ src/test/regress/expected/alter_table_modify.out | 12 ++++++------ .../expected/alter_table_modify_ustore.out | 12 ++++++------ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index e6a6e524fa..80d654d117 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -13280,6 +13280,8 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat query_str = CheckPgRewriteFirstAfter(rel); tab->rewrite |= AT_REWRITE_ALTER_PERSISTENCE; tab->is_first_after = true; + } else if (rel->rd_rel->relkind == RELKIND_RELATION) { + query_str = CheckPgRewriteFirstAfter(rel); } InsertPgAttributeTuple(attrdesc, &attribute, NULL); @@ -13322,6 +13324,19 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat /* create or replace view */ ReplaceViewQueryFirstAfter(query_str); + } else if (rel->rd_rel->relkind == RELKIND_RELATION && query_str != NIL) { + ListCell* viewinfo = NULL; + bool isViewValid = true; + foreach (viewinfo, query_str) { + ViewInfoForAdd *info = (ViewInfoForAdd *)lfirst(viewinfo); + isViewValid &= GetPgObjectValid(info->ev_class, OBJECT_TYPE_VIEW); + if (!isViewValid) { + break; + } + } + if (isViewValid) { + ReplaceViewQueryFirstAfter(query_str); + } } /* diff --git a/src/test/regress/expected/alter_table_modify.out b/src/test/regress/expected/alter_table_modify.out index 14c2074309..c44c02b913 100644 --- a/src/test/regress/expected/alter_table_modify.out +++ b/src/test/regress/expected/alter_table_modify.out @@ -3183,9 +3183,9 @@ ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20), ADD COLUMN ERROR: cannot change data type of view column "f1" from integer to character varying(20) ALTER TABLE test_at_modify_view_column MODIFY column f1 int, ADD COLUMN f0 int; SELECT pg_get_viewdef('test_modify_view_star'::regclass); - pg_get_viewdef --------------------------------------------- - SELECT * FROM test_at_modify_view_column; + pg_get_viewdef +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column; (1 row) SELECT * FROM test_modify_view_star; @@ -3388,9 +3388,9 @@ ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20), ADD COL ERROR: cannot change data type of view column "f1" from integer to character varying(20) ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int, ADD COLUMN f0 int; SELECT pg_get_viewdef('test_modify_view_star'::regclass); - pg_get_viewdef --------------------------------------------- - SELECT * FROM test_at_modify_view_column; + pg_get_viewdef +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.c1 AS f1 FROM test_at_modify_view_column; (1 row) SELECT * FROM test_modify_view_star; diff --git a/src/test/regress/expected/alter_table_modify_ustore.out b/src/test/regress/expected/alter_table_modify_ustore.out index 77f21f37a6..a228d781b0 100644 --- a/src/test/regress/expected/alter_table_modify_ustore.out +++ b/src/test/regress/expected/alter_table_modify_ustore.out @@ -2787,9 +2787,9 @@ ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20), ADD COLUMN ERROR: cannot change data type of view column "f1" from integer to character varying(20) ALTER TABLE test_at_modify_view_column MODIFY column f1 int, ADD COLUMN f0 int; SELECT pg_get_viewdef('test_modify_view_star'::regclass); - pg_get_viewdef --------------------------------------------- - SELECT * FROM test_at_modify_view_column; + pg_get_viewdef +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column; (1 row) SELECT * FROM test_modify_view_star; @@ -2992,9 +2992,9 @@ ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20), ADD COL ERROR: cannot change data type of view column "f1" from integer to character varying(20) ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int, ADD COLUMN f0 int; SELECT pg_get_viewdef('test_modify_view_star'::regclass); - pg_get_viewdef --------------------------------------------- - SELECT * FROM test_at_modify_view_column; + pg_get_viewdef +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.c1 AS f1 FROM test_at_modify_view_column; (1 row) SELECT * FROM test_modify_view_star; -- Gitee From 22492a48dc82520771b1026d1441d0c825873d6f Mon Sep 17 00:00:00 2001 From: cchen676 Date: Sat, 10 Aug 2024 17:42:11 +0800 Subject: [PATCH 195/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=A4=87=E6=9C=BA?= =?UTF-8?q?=E5=BC=80=E5=90=AF=E5=86=99=E8=BD=AC=E5=8F=91=E5=90=8E=E7=9A=84?= =?UTF-8?q?=E4=B8=80=E4=BA=9B=E4=B9=8B=E5=89=8D=E8=AE=BE=E8=AE=A1=E6=97=B6?= =?UTF-8?q?=E6=B2=A1=E8=80=83=E8=99=91=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/pg_proc.cpp | 2 +- .../backend/client_logic/client_logic.cpp | 10 ++++----- src/common/backend/parser/parse_utilcmd.cpp | 2 +- src/common/pl/plpgsql/src/pl_exec.cpp | 5 +++++ .../optimizer/commands/prepare.cpp | 10 +++++++++ src/gausskernel/optimizer/plan/planner.cpp | 6 ++++++ .../storage/replication/libpqsw.cpp | 21 ++++++++++++++----- src/include/replication/libpqsw.h | 13 +++++++++++- 8 files changed, 56 insertions(+), 13 deletions(-) diff --git a/src/common/backend/catalog/pg_proc.cpp b/src/common/backend/catalog/pg_proc.cpp index d30119793a..d85c0767e8 100644 --- a/src/common/backend/catalog/pg_proc.cpp +++ b/src/common/backend/catalog/pg_proc.cpp @@ -1234,7 +1234,7 @@ ObjectAddress ProcedureCreate(const char* procedureName, Oid procNamespace, Oid * But when we are in inplace-upgrade, we can create function with polymorphic return type */ if (!u_sess->attr.attr_common.enable_full_encryption && !u_sess->attr.attr_common.IsInplaceUpgrade && - (fullEncryptedInParam || fullEncryptedOutParam || is_enc_type(returnType))) { + (fullEncryptedInParam || fullEncryptedOutParam || is_enc_type(returnType)) && t_thrd.role != SW_SENDER) { ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot create function"), errdetail("function does not support full encrypted type parameter when client encryption is disabled."))); } diff --git a/src/common/backend/client_logic/client_logic.cpp b/src/common/backend/client_logic/client_logic.cpp index 631ba9c240..97a940246a 100755 --- a/src/common/backend/client_logic/client_logic.cpp +++ b/src/common/backend/client_logic/client_logic.cpp @@ -289,7 +289,7 @@ void insert_gs_sec_encrypted_column_tuple(CeHeapInfo *ce_heap_info, Relation rel #ifdef ENABLE_MULTIPLE_NODES if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) { #else - if (!u_sess->attr.attr_common.enable_full_encryption) { + if (!u_sess->attr.attr_common.enable_full_encryption && t_thrd.role != SW_SENDER) { #endif ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), @@ -633,7 +633,7 @@ static bool process_column_settings_flush_args(Oid column_key_id, const char *co #ifdef ENABLE_MULTIPLE_NODES if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) { #else - if (!u_sess->attr.attr_common.enable_full_encryption) { + if (!u_sess->attr.attr_common.enable_full_encryption && t_thrd.role != SW_SENDER) { #endif ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), @@ -894,7 +894,7 @@ int drop_global_settings(DropStmt *stmt) #ifdef ENABLE_MULTIPLE_NODES if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) { #else - if (!u_sess->attr.attr_common.enable_full_encryption) { + if (!u_sess->attr.attr_common.enable_full_encryption && t_thrd.role != SW_SENDER) { #endif ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), @@ -961,7 +961,7 @@ int drop_column_settings(DropStmt *stmt) #ifdef ENABLE_MULTIPLE_NODES if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) { #else - if (!u_sess->attr.attr_common.enable_full_encryption) { + if (!u_sess->attr.attr_common.enable_full_encryption && t_thrd.role != SW_SENDER) { #endif ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), @@ -1065,7 +1065,7 @@ void remove_encrypted_col_by_id(Oid id) #ifdef ENABLE_MULTIPLE_NODES if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) { #else - if (!u_sess->attr.attr_common.enable_full_encryption) { + if (!u_sess->attr.attr_common.enable_full_encryption && t_thrd.role != SW_SENDER) { #endif ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index 773f66ec62..fd47ffa338 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -1324,7 +1324,7 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column, #ifdef ENABLE_MULTIPLE_NODES if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) { #else - if (!u_sess->attr.attr_common.enable_full_encryption) { + if (!u_sess->attr.attr_common.enable_full_encryption && t_thrd.role != SW_SENDER) { #endif ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index cefc5430e0..647550b5d8 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1343,6 +1343,11 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, } #endif + if (SS_STANDBY_MODE) { + ereport(ERROR, + (errmodule(MOD_PLSQL), errmsg("SS Standby node does not support invoking autonomous transactions."))); + } + #ifndef ENABLE_MULTIPLE_NODES if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); diff --git a/src/gausskernel/optimizer/commands/prepare.cpp b/src/gausskernel/optimizer/commands/prepare.cpp index cf965ed58e..d9e1739978 100755 --- a/src/gausskernel/optimizer/commands/prepare.cpp +++ b/src/gausskernel/optimizer/commands/prepare.cpp @@ -333,6 +333,11 @@ void PrepareQuery(PrepareStmt* stmt, const char* queryString) */ StorePreparedStatement(stmt->name, plansource, true); + if (ENABLE_REMOTE_EXECUTE) { + const char* commandTag = CreateCommandTag(stmt->query); + (void)libpqsw_process_query_message(commandTag, query_list, queryString, false, false); + } + #ifdef ENABLE_MOT // Try MOT JIT code generation only after the plan source is saved. if ((plansource->storageEngineType == SE_TYPE_MOT || plansource->storageEngineType == SE_TYPE_UNSPECIFIED) && @@ -381,6 +386,11 @@ void ExecuteQuery(ExecuteStmt* stmt, IntoClause* intoClause, const char* querySt ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("EXECUTE does not support variable-result cached plans"))); + if (ENABLE_REMOTE_EXECUTE && + libpqsw_process_query_message(psrc->commandTag, psrc->query_list, queryString, false, false)) { + return; + } + /* Evaluate parameters, if any */ if (entry->plansource->num_params > 0) { /* diff --git a/src/gausskernel/optimizer/plan/planner.cpp b/src/gausskernel/optimizer/plan/planner.cpp index 7e8d3bcc89..eac0ad1ed0 100755 --- a/src/gausskernel/optimizer/plan/planner.cpp +++ b/src/gausskernel/optimizer/plan/planner.cpp @@ -443,6 +443,12 @@ bool queryIsReadOnly(Query* query) case CMD_INSERT: case CMD_DELETE: case CMD_MERGE: { + if (SS_STANDBY_MODE_WITH_REMOTE_EXECUTE && query->utilityStmt != NULL && + (query->utilityStmt->type == T_PrepareStmt || query->utilityStmt->type == T_ExecuteStmt || + query->utilityStmt->type == T_DeallocateStmt || query->utilityStmt->type == T_CopyStmt)) { + return true; + } + if (SS_STANDBY_MODE_WITH_REMOTE_EXECUTE && get_redirect_manager()->state.transaction) { get_redirect_manager()->ss_standby_state |= SS_STANDBY_REQ_WRITE_REDIRECT; } diff --git a/src/gausskernel/storage/replication/libpqsw.cpp b/src/gausskernel/storage/replication/libpqsw.cpp index f5b39aa146..a5da8c3178 100644 --- a/src/gausskernel/storage/replication/libpqsw.cpp +++ b/src/gausskernel/storage/replication/libpqsw.cpp @@ -189,6 +189,7 @@ void RedirectMessageManager::push_message(int qtype, StringInfo msg, bool need_s last_message = qtype; copyStringInfo(cur_msg->pbe_stack_msgs[cur_msg->cur_pos], msg); cur_msg->cur_pos ++; + libpqsw_trace("[PUSHED MSG] %c: msg:%s, pos:%d", qtype, msg->data, cur_msg->cur_pos - 1); MemoryContextSwitchTo(old); } @@ -590,6 +591,11 @@ void libpqsw_set_set_command(bool set_command) get_redirect_manager()->state.set_command = set_command; } +static bool libpqsw_prepare_command(const char* commandTag) +{ + return commandTag != NULL && (strcmp(commandTag, "PREPARE") == 0 || strcmp(commandTag, "DEALLOCATE") == 0); +} + /* * wrapper remote excute for extend query (PBE) */ @@ -924,7 +930,8 @@ static bool libpqsw_need_localexec_forSimpleQuery(const char *commandTag, List * redirect_manager->ss_standby_state |= SS_STANDBY_REQ_SAVEPOINT; } else if (query_list != NIL) { /* Don't support DDL with in transaction */ - if (set_command_type_by_commandTag(commandTag) == CMD_DDL || libpqsw_special_command(commandTag)) { + if ((set_command_type_by_commandTag(commandTag) == CMD_DDL && !libpqsw_prepare_command(commandTag)) || + libpqsw_special_command(commandTag)) { if (libpqsw_fetch_command(commandTag)) { get_redirect_manager()->ss_standby_state |= SS_STANDBY_REQ_WRITE_REDIRECT; return ret; @@ -1068,10 +1075,14 @@ bool libpqsw_process_message(int qtype, StringInfo msg) ready_to_excute = redirect_manager->push_message(qtype, msg, false, RT_NORMAL); if (ready_to_excute) { - libpqsw_inner_excute_pbe(true, true); - libpqsw_set_batch(false); - libpqsw_set_redirect(false); - libpqsw_set_set_command(false); + if (qtype != 'S') { + libpqsw_inner_excute_pbe(false, false); + } else if (qtype == 'S') { + libpqsw_inner_excute_pbe(true, true); + libpqsw_set_batch(false); + libpqsw_set_redirect(false); + libpqsw_set_set_command(false); + } } /* for begin in pbe and in trxn */ diff --git a/src/include/replication/libpqsw.h b/src/include/replication/libpqsw.h index ea9530d498..7eb4402169 100644 --- a/src/include/replication/libpqsw.h +++ b/src/include/replication/libpqsw.h @@ -131,9 +131,10 @@ typedef struct { } RedirectState; // the max len =(PBEPBEDS) == 8, 20 is enough -#define PBE_MESSAGE_STACK (20) +#define PBE_MESSAGE_STACK (25) #define PBE_MESSAGE_MERGE_ID (PBE_MESSAGE_STACK - 1) #define PBE_MAX_SET_BLOCK (10) +#define PBE_MESSAGE_MAX_CUR_FOR_PBE (PBE_MESSAGE_STACK - 6) enum RedirectType { RT_NORMAL, //transfer to standby RT_TXN_STATUS, @@ -201,6 +202,11 @@ public: return list_length(messages) == PBE_MAX_SET_BLOCK; } + int curpos_of_message() + { + return ((RedirectMessage *)llast(messages))->cur_pos; + } + // is pre last message S or Q bool pre_last_message() { @@ -278,6 +284,11 @@ public: if (qtype == 'S' || qtype == 'Q') { return state.already_connected || messages_manager.lots_of_message(); } + + if (qtype == 'E' && messages_manager.curpos_of_message() > PBE_MESSAGE_MAX_CUR_FOR_PBE) { + return true; + } + return false; } -- Gitee From b0fc50b3040125e84237c2083df55a3bf0d6933d Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Wed, 14 Aug 2024 11:35:55 +0800 Subject: [PATCH 196/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=88=9B=E5=BB=BA?= =?UTF-8?q?=E5=B8=A6=E5=B5=8C=E5=A5=97tableof=E5=85=A5=E5=8F=82=E7=9A=84?= =?UTF-8?q?=E5=8C=85=E5=87=BD=E6=95=B0coredump=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_package.cpp | 9 ++- .../plpgsql_nested_array_and_record.out | 62 +++++++++++++++++++ .../sql/plpgsql_nested_array_and_record.sql | 48 ++++++++++++++ 3 files changed, 117 insertions(+), 2 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_package.cpp b/src/common/pl/plpgsql/src/pl_package.cpp index 034ddeecd4..d4eee0debd 100644 --- a/src/common/pl/plpgsql/src/pl_package.cpp +++ b/src/common/pl/plpgsql/src/pl_package.cpp @@ -683,6 +683,7 @@ void delete_package(PLpgSQL_package* pkg) /* free package memory,*/ plpgsql_pkg_HashTableDelete(pkg); plpgsql_free_package_memory(pkg); + pfree_ext(pkg); } static void plpgsql_pkg_append_dlcell(plpgsql_pkg_HashEnt* entity) @@ -703,7 +704,6 @@ static void plpgsql_pkg_append_dlcell(plpgsql_pkg_HashEnt* entity) /* delete from the hash and delete the function's compile */ CheckCurrCompileDependOnPackage(pkg->pkg_oid); delete_package(pkg); - pfree_ext(pkg); } } @@ -825,7 +825,8 @@ static PLpgSQL_package* do_pkg_compile(Oid pkgOid, HeapTuple pkg_tup, PLpgSQL_pa PLpgSQL_compile_context* curr_compile = createCompileContext(context_name); SPI_NESTCOMPILE_LOG(curr_compile->compile_cxt); bool pkg_is_null = false; - if (pkg == NULL) { + /* pkg_cxt is null, means that delete_package has been done, and pkg has been freed. */ + if (pkg == NULL || pkg->pkg_cxt == NULL) { pkg = (PLpgSQL_package*)MemoryContextAllocZero( SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER), sizeof(PLpgSQL_package)); pkg->pkg_cxt = curr_compile->compile_cxt; @@ -1330,6 +1331,10 @@ Oid findPackageParameter(const char* objname) } break; } + case PLPGSQL_NSTYPE_TABLE: + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("nested-table type is not supported for parameter yet"))); + break; default: toid = InvalidOid; } diff --git a/src/test/regress/expected/plpgsql_nested_array_and_record.out b/src/test/regress/expected/plpgsql_nested_array_and_record.out index 53cf9d73be..53c40b5ba2 100644 --- a/src/test/regress/expected/plpgsql_nested_array_and_record.out +++ b/src/test/regress/expected/plpgsql_nested_array_and_record.out @@ -116,6 +116,68 @@ NOTICE: ID: 1, NAME: RECORD (1 row) +set behavior_compat_options='plpgsql_dependency'; +create or replace package pac_PLArray_Case0021 is + type typ_PLArray_1 is table of varchar(100); + type typ_PLArray_2 is table of typ_PLArray_1; + nstarr typ_PLArray_2; + + procedure p_PLArray_1; + procedure p_PLArray_2(var typ_PLArray_2); +end pac_PLArray_Case0021; +/ +WARNING: Type typ_plarray_2 does not exist. +ERROR: nested-table type is not supported for parameter yet +create or replace package body pac_PLArray_Case0021 is +procedure p_PLArray_1() is +begin +nstarr(2)(1):='第二行第一列'; +perform p_PLArray_2(nstarr); +end; + +procedure p_PLArray_2(var typ_PLArray_2) is +begin + insert into t_PLArray_case0021(col) values(var(2)(1)); +end; +end pac_PLArray_Case0021; +/ +ERROR: package spec not found +create or replace package pac_PLArray_Case0021 is + procedure p_PLArray_1; + procedure p_PLArray_2(var typ_PLArray_3); +end pac_PLArray_Case0021; +/ +WARNING: Type typ_plarray_3 does not exist. +WARNING: The header information of function p_plarray_2 is not defined. +CONTEXT: compilation of PL/pgSQL package near line 1 +WARNING: Package created with compilation erors. +create or replace package body pac_PLArray_Case0021 is +procedure p_PLArray_1() is +begin +nstarr(2)(1):='第二行第一列'; +perform p_PLArray_2(nstarr); +end; + +procedure p_PLArray_2(var typ_PLArray_3) is +begin + insert into t_PLArray_case0021(col) values(var(2)(1)); +end; +end pac_PLArray_Case0021; +/ +WARNING: Type typ_plarray_3 does not exist. +WARNING: The header information of function p_plarray_2 is not defined. +CONTEXT: compilation of PL/pgSQL package near line 1 +WARNING: function "nstarr" doesn't exist +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 1 +WARNING: syntax error at or near "(" when compile function p_plarray_2(undefined) +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 1 +WARNING: Package Body created with compilation erors. +drop package pac_PLArray_Case0021; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_nested_array_and_record.p_plarray_1() +drop cascades to function plpgsql_nested_array_and_record.p_plarray_2(undefined) DROP SCHEMA plpgsql_nested_array_and_record CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function test_nested_array() diff --git a/src/test/regress/sql/plpgsql_nested_array_and_record.sql b/src/test/regress/sql/plpgsql_nested_array_and_record.sql index 2fda94dd15..be31c8b8a3 100644 --- a/src/test/regress/sql/plpgsql_nested_array_and_record.sql +++ b/src/test/regress/sql/plpgsql_nested_array_and_record.sql @@ -84,4 +84,52 @@ END; / CALL test_nested(); +set behavior_compat_options='plpgsql_dependency'; + +create or replace package pac_PLArray_Case0021 is + type typ_PLArray_1 is table of varchar(100); + type typ_PLArray_2 is table of typ_PLArray_1; + nstarr typ_PLArray_2; + + procedure p_PLArray_1; + procedure p_PLArray_2(var typ_PLArray_2); +end pac_PLArray_Case0021; +/ + +create or replace package body pac_PLArray_Case0021 is +procedure p_PLArray_1() is +begin +nstarr(2)(1):='第二行第一列'; +perform p_PLArray_2(nstarr); +end; + +procedure p_PLArray_2(var typ_PLArray_2) is +begin + insert into t_PLArray_case0021(col) values(var(2)(1)); +end; +end pac_PLArray_Case0021; +/ + +create or replace package pac_PLArray_Case0021 is + procedure p_PLArray_1; + procedure p_PLArray_2(var typ_PLArray_3); +end pac_PLArray_Case0021; +/ + +create or replace package body pac_PLArray_Case0021 is +procedure p_PLArray_1() is +begin +nstarr(2)(1):='第二行第一列'; +perform p_PLArray_2(nstarr); +end; + +procedure p_PLArray_2(var typ_PLArray_3) is +begin + insert into t_PLArray_case0021(col) values(var(2)(1)); +end; +end pac_PLArray_Case0021; +/ + +drop package pac_PLArray_Case0021; + DROP SCHEMA plpgsql_nested_array_and_record CASCADE; -- Gitee From cc4549771976eab4ece98897bc20f102e62fab21 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 19 Aug 2024 16:58:10 +0800 Subject: [PATCH 197/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=B8=B8=E6=A0=87?= =?UTF-8?q?=E5=90=91=E9=87=8F=E5=8C=96codegen=E7=9A=84core=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/runtime/executor/execMain.cpp | 8 +- .../runtime/executor/execUtils.cpp | 1 + src/include/nodes/execnodes.h | 1 + src/test/regress/expected/llvm_vecexpr3.out | 95 +++++++++++++++++++ src/test/regress/sql/llvm_vecexpr3.sql | 77 ++++++++++++++- 5 files changed, 179 insertions(+), 3 deletions(-) diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp index a09bbb031a..2be1fd4655 100755 --- a/src/gausskernel/runtime/executor/execMain.cpp +++ b/src/gausskernel/runtime/executor/execMain.cpp @@ -608,6 +608,7 @@ void standard_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long co } else { CodeGenThreadRuntimeCodeGenerate(); } + estate->compileCodegen = true; } #endif @@ -843,8 +844,11 @@ void standard_ExecutorEnd(QueryDesc *queryDesc) UnregisterSnapshot(estate->es_crosscheck_snapshot); #ifdef ENABLE_LLVM_COMPILE - /* Do not release codegen in Fmgr and Procedure */ - if (!t_thrd.codegen_cxt.g_runningInFmgr && u_sess->SPI_cxt._connected == -1) { + /* + * Do not release codegen in Fmgr and Procedure. And if codegen modulre + * is compiled, only estate which has compiled it can release. + */ + if (u_sess->SPI_cxt._connected == -1 && (CodeGenThreadObjectReady() || estate->compileCodegen)) { CodeGenThreadTearDown(); } #endif diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp index dd33d0d2d3..ff6377c406 100644 --- a/src/gausskernel/runtime/executor/execUtils.cpp +++ b/src/gausskernel/runtime/executor/execUtils.cpp @@ -207,6 +207,7 @@ EState* CreateExecutorState() estate->cur_insert_autoinc = 0; estate->next_autoinc = 0; estate->es_is_flt_frame = (u_sess->attr.attr_common.enable_expr_fusion && u_sess->attr.attr_sql.query_dop_tmp == 1); + estate->compileCodegen = false; /* * Return the executor state structure */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index c369b514ef..3240dc40b8 100755 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -739,6 +739,7 @@ typedef struct EState { #ifdef USE_SPQ List *es_sharenode; #endif + bool compileCodegen; } EState; /* diff --git a/src/test/regress/expected/llvm_vecexpr3.out b/src/test/regress/expected/llvm_vecexpr3.out index 1981ac8ee8..fefaee9668 100644 --- a/src/test/regress/expected/llvm_vecexpr3.out +++ b/src/test/regress/expected/llvm_vecexpr3.out @@ -539,6 +539,101 @@ select col_int, col_intervaltz from llvm_vecexpr_table_04 where nullif(col_inter | ["Mon May 10 07:59:12 1937 PST" "Sat Jan 13 23:14:21 2001 PST"] (15 rows) +CREATE TABLE t_rate_calculation ( + id character varying(32) NOT NULL, + dispatch_no character varying(32) NOT NULL, + waybill_no character varying(32) NOT NULL, + adjustment_price numeric(18,4), + artifical_assessmen_price numeric(18,2), + manual_change numeric(18,2) +) WITH (orientation=row, compression=no); +CREATE TABLE t_vehicle_plan ( + id character varying(32), + waybill_no character varying(20) +) WITH (orientation=row, compression=no); +CREATE TABLE t_plan_vehicle ( + id character varying(32) NOT NULL, + plan_id character varying(32), + carrier_id character varying(32), + dispatch_no character varying(32) +) WITH (orientation=row, compression=no); +CREATE TABLE t_carrier_info ( + id character varying(32) NOT NULL, + carrier_code character varying(20) +) WITH (orientation=row, compression=no); +CREATE TABLE t_waybill_info_local ( + id character varying(32) NOT NULL, + waybill_no character varying(20) +) WITH (orientation=row, compression=no); +CREATE TABLE t_waybill_carrier ( + id character varying(32) NOT NULL, + waybill_no character varying(20), + carrier_code character varying(20) +) WITH (orientation=row, compression=no); +insert into t_rate_calculation values (generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000)); +insert into t_vehicle_plan values (generate_series(1,10000),generate_series(1,10000)); +insert into t_plan_vehicle values (generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000)); +insert into t_carrier_info values (generate_series(1,10000),generate_series(1,10000)); +insert into t_waybill_info_local values (generate_series(1,10000),generate_series(1,10000)); +insert into t_waybill_carrier values (generate_series(1,10000),generate_series(1,10000),generate_series(1,10000)); +set try_vector_engine_strategy='force'; +set codegen_cost_threshold = 100; +set enable_codegen = on; +explain (costs off) declare xc no scroll cursor for select rc.waybill_no 邮路代码, rc.dispatch_no 派车单, rc.adjustment_price+rc.artifical_assessmen_price +rc.manual_change 手工调账考核 from t_rate_calculation rc +left JOIN t_plan_vehicle pv on pv.dispatch_no=rc.dispatch_no +left join t_carrier_info ci on pv.carrier_id=ci.id--承运商 +left join t_vehicle_plan vp on pv.plan_id=vp.id +left join t_waybill_info_local wl on vp.waybill_no=wl.waybill_no +left join t_waybill_carrier wc on ci.carrier_code=wc.carrier_code and wc.waybill_no=wl.waybill_no; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Row Adapter + -> Vector Hash Right Join + Hash Cond: ((pv.dispatch_no)::text = (rc.dispatch_no)::text) + -> Vector Hash Left Join + Hash Cond: (((ci.carrier_code)::text = (wc.carrier_code)::text) AND ((wl.waybill_no)::text = (wc.waybill_no)::text)) + -> Vector Hash Right Join + Hash Cond: ((vp.id)::text = (pv.plan_id)::text) + -> Vector Hash Left Join + Hash Cond: ((vp.waybill_no)::text = (wl.waybill_no)::text) + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on t_vehicle_plan vp + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on t_waybill_info_local wl + -> Vector Hash Right Join + Hash Cond: ((ci.id)::text = (pv.carrier_id)::text) + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on t_carrier_info ci + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on t_plan_vehicle pv + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on t_waybill_carrier wc + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on t_rate_calculation rc +(23 rows) + +begin; +declare xc no scroll cursor for select rc.waybill_no 邮路代码, rc.dispatch_no 派车单, rc.adjustment_price+rc.artifical_assessmen_price +rc.manual_change 手工调账考核 from t_rate_calculation rc +left JOIN t_plan_vehicle pv on pv.dispatch_no=rc.dispatch_no +left join t_carrier_info ci on pv.carrier_id=ci.id--承运商 +left join t_vehicle_plan vp on pv.plan_id=vp.id +left join t_waybill_info_local wl on vp.waybill_no=wl.waybill_no +left join t_waybill_carrier wc on ci.carrier_code=wc.carrier_code and wc.waybill_no=wl.waybill_no; +move 1000 xc; +select 1; + ?column? +---------- + 1 +(1 row) + +move 1000 xc; +end; +drop table t_rate_calculation; +drop table t_vehicle_plan; +drop table t_plan_vehicle; +drop table t_carrier_info; +drop table t_waybill_info_local; +drop table t_waybill_carrier; ---- --- clean table and resource ---- diff --git a/src/test/regress/sql/llvm_vecexpr3.sql b/src/test/regress/sql/llvm_vecexpr3.sql index 97152d2f35..c48e769e11 100644 --- a/src/test/regress/sql/llvm_vecexpr3.sql +++ b/src/test/regress/sql/llvm_vecexpr3.sql @@ -163,7 +163,82 @@ select col_int, col_real, col_decimal from llvm_vecexpr_table_04 where nullif(co select col_int, col_intervaltz from llvm_vecexpr_table_04 where nullif(col_intervaltz, '["1937-06-11 23:59:12+08" "2001-11-14 15:14:21+08"]') is not NULL order by 1, 2; +CREATE TABLE t_rate_calculation ( + id character varying(32) NOT NULL, + dispatch_no character varying(32) NOT NULL, + waybill_no character varying(32) NOT NULL, + adjustment_price numeric(18,4), + artifical_assessmen_price numeric(18,2), + manual_change numeric(18,2) +) WITH (orientation=row, compression=no); + +CREATE TABLE t_vehicle_plan ( + id character varying(32), + waybill_no character varying(20) +) WITH (orientation=row, compression=no); + +CREATE TABLE t_plan_vehicle ( + id character varying(32) NOT NULL, + plan_id character varying(32), + carrier_id character varying(32), + dispatch_no character varying(32) +) WITH (orientation=row, compression=no); + +CREATE TABLE t_carrier_info ( + id character varying(32) NOT NULL, + carrier_code character varying(20) +) WITH (orientation=row, compression=no); + +CREATE TABLE t_waybill_info_local ( + id character varying(32) NOT NULL, + waybill_no character varying(20) +) WITH (orientation=row, compression=no); + +CREATE TABLE t_waybill_carrier ( + id character varying(32) NOT NULL, + waybill_no character varying(20), + carrier_code character varying(20) +) WITH (orientation=row, compression=no); + +insert into t_rate_calculation values (generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000)); +insert into t_vehicle_plan values (generate_series(1,10000),generate_series(1,10000)); +insert into t_plan_vehicle values (generate_series(1,10000),generate_series(1,10000),generate_series(1,10000),generate_series(1,10000)); +insert into t_carrier_info values (generate_series(1,10000),generate_series(1,10000)); +insert into t_waybill_info_local values (generate_series(1,10000),generate_series(1,10000)); +insert into t_waybill_carrier values (generate_series(1,10000),generate_series(1,10000),generate_series(1,10000)); + +set try_vector_engine_strategy='force'; +set codegen_cost_threshold = 100; +set enable_codegen = on; + +explain (costs off) declare xc no scroll cursor for select rc.waybill_no 邮路代码, rc.dispatch_no 派车单, rc.adjustment_price+rc.artifical_assessmen_price +rc.manual_change 手工调账考核 from t_rate_calculation rc +left JOIN t_plan_vehicle pv on pv.dispatch_no=rc.dispatch_no +left join t_carrier_info ci on pv.carrier_id=ci.id--承运商 +left join t_vehicle_plan vp on pv.plan_id=vp.id +left join t_waybill_info_local wl on vp.waybill_no=wl.waybill_no +left join t_waybill_carrier wc on ci.carrier_code=wc.carrier_code and wc.waybill_no=wl.waybill_no; + +begin; +declare xc no scroll cursor for select rc.waybill_no 邮路代码, rc.dispatch_no 派车单, rc.adjustment_price+rc.artifical_assessmen_price +rc.manual_change 手工调账考核 from t_rate_calculation rc +left JOIN t_plan_vehicle pv on pv.dispatch_no=rc.dispatch_no +left join t_carrier_info ci on pv.carrier_id=ci.id--承运商 +left join t_vehicle_plan vp on pv.plan_id=vp.id +left join t_waybill_info_local wl on vp.waybill_no=wl.waybill_no +left join t_waybill_carrier wc on ci.carrier_code=wc.carrier_code and wc.waybill_no=wl.waybill_no; + +move 1000 xc; +select 1; +move 1000 xc; +end; + +drop table t_rate_calculation; +drop table t_vehicle_plan; +drop table t_plan_vehicle; +drop table t_carrier_info; +drop table t_waybill_info_local; +drop table t_waybill_carrier; + ---- --- clean table and resource ---- -drop schema llvm_vecexpr_engine3 cascade ; \ No newline at end of file +drop schema llvm_vecexpr_engine3 cascade ; -- Gitee From 27846aaed86294e440c2225e2ad658c7c6793be8 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 15 Aug 2024 14:38:22 +0800 Subject: [PATCH 198/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dinsertrow=E4=B8=AD?= =?UTF-8?q?=E5=AD=98=E5=9C=A8=E7=94=A8=E6=88=B7=E5=8F=98=E9=87=8F=E6=97=B6?= =?UTF-8?q?=E5=BC=82=E5=B8=B8=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/nodeFuncs.cpp | 3 +-- src/gausskernel/optimizer/util/clauses.cpp | 3 +++ .../regress/input/set_user_defined_variables_test.source | 7 +++++++ .../regress/output/set_user_defined_variables_test.source | 5 +++++ 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp index 17a9d95120..25d40d65e5 100644 --- a/src/common/backend/nodes/nodeFuncs.cpp +++ b/src/common/backend/nodes/nodeFuncs.cpp @@ -2098,8 +2098,7 @@ bool expression_tree_walker(Node* node, bool (*walker)(), void* context) case T_PrefixKey: return p2walker(((PrefixKey*)node)->arg, context); case T_UserSetElem: { - p2walker(((UserSetElem*)node)->val, context); - return true; + return p2walker(((UserSetElem*)node)->val, context); } case T_PriorExpr: return p2walker(((PriorExpr*)node)->node, context); diff --git a/src/gausskernel/optimizer/util/clauses.cpp b/src/gausskernel/optimizer/util/clauses.cpp index b447c3ecf4..2568542f71 100644 --- a/src/gausskernel/optimizer/util/clauses.cpp +++ b/src/gausskernel/optimizer/util/clauses.cpp @@ -1191,6 +1191,9 @@ static bool contain_specified_functions_walker(Node* node, check_function_contex } else if (IsA(node, Query) && context->deep) { /* Recurse into subselects */ return query_tree_walker((Query*)node, (bool (*)())contain_specified_functions_walker, context, 0); + } else if (IsA(node, UserSetElem)) { + /* UserSetElem is volatile */ + return context->checktype == CONTAIN_VOLATILE_FUNTION; } return expression_tree_walker(node, (bool (*)())contain_specified_functions_walker, context); } diff --git a/src/test/regress/input/set_user_defined_variables_test.source b/src/test/regress/input/set_user_defined_variables_test.source index f9f9e80752..dd770ef738 100644 --- a/src/test/regress/input/set_user_defined_variables_test.source +++ b/src/test/regress/input/set_user_defined_variables_test.source @@ -774,6 +774,13 @@ END WHILE label_1; end; / SELECT TRIM(TRAILING ', ' FROM @sequence); + +set enable_set_variable_b_format = 1; +CREATE TABLE userset_t1 (a VARCHAR(500) CHARACTER SET UTF8); +SET @num=1000; +INSERT INTO userset_t1 VALUES (CONCAT((@num:=@num+1), 'a')); + +set enable_set_variable_b_format = default; \c regression drop database if exists test_set; diff --git a/src/test/regress/output/set_user_defined_variables_test.source b/src/test/regress/output/set_user_defined_variables_test.source index 032412d9cb..b5f693d4cd 100644 --- a/src/test/regress/output/set_user_defined_variables_test.source +++ b/src/test/regress/output/set_user_defined_variables_test.source @@ -1520,6 +1520,11 @@ SELECT TRIM(TRAILING ', ' FROM @sequence); 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 (1 row) +set enable_set_variable_b_format = 1; +CREATE TABLE userset_t1 (a VARCHAR(500) CHARACTER SET UTF8); +SET @num=1000; +INSERT INTO userset_t1 VALUES (CONCAT((@num:=@num+1), 'a')); +set enable_set_variable_b_format = default; \c regression drop database if exists test_set; \! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=off" >/dev/null 2>&1 -- Gitee From 4992f2c3a2d27e972d25a705ca8e9c0d4895bc28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Wed, 14 Aug 2024 21:34:34 +0800 Subject: [PATCH 199/347] =?UTF-8?q?decode=20A=E6=A8=A1=E5=BC=8F=E5=85=BC?= =?UTF-8?q?=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/nodeFuncs.cpp | 12 +- src/common/backend/parser/parse_coerce.cpp | 3 +- src/common/backend/parser/parse_expr.cpp | 28 +- src/gausskernel/optimizer/util/clauses.cpp | 16 + .../expected/decode_compatible_with_o.out | 3948 +++++++++++++++-- .../regress/sql/decode_compatible_with_o.sql | 687 ++- 6 files changed, 4431 insertions(+), 263 deletions(-) diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp index 25d40d65e5..0ecffd31b3 100644 --- a/src/common/backend/nodes/nodeFuncs.cpp +++ b/src/common/backend/nodes/nodeFuncs.cpp @@ -259,7 +259,15 @@ Oid exprType(const Node* expr) case T_CursorExpression: type = REFCURSOROID; break; - + case T_TypeCast: + { + TypeCast *tc = (TypeCast*)expr; + if (tc->typname == NULL || !OidIsValid(tc->typname->typeOid)) { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("invalid typecast node"))); + } + type = tc->typname->typeOid; + break; + } default: ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized node type: %d", (int)nodeTag(expr)))); @@ -1238,6 +1246,7 @@ void exprSetCollation(Node* expr, Oid collation) case T_PriorExpr: return exprSetCollation((Node*)((const PriorExpr*)expr)->node, collation); case T_CursorExpression: + case T_TypeCast: break; default: ereport( @@ -1767,6 +1776,7 @@ bool expression_tree_walker(Node* node, bool (*walker)(), void* context) case T_Rownum: case T_UserVar: case T_SetVariableExpr: + case T_TypeCast: #ifdef USE_SPQ case T_DMLActionExpr: #endif diff --git a/src/common/backend/parser/parse_coerce.cpp b/src/common/backend/parser/parse_coerce.cpp index 037c0fc331..cb071ca0fe 100644 --- a/src/common/backend/parser/parse_coerce.cpp +++ b/src/common/backend/parser/parse_coerce.cpp @@ -1697,7 +1697,8 @@ static bool category_can_be_matched(TYPCATEGORY preferCategory, TYPCATEGORY next {TYPCATEGORY_UNKNOWN, TYPCATEGORY_STRING}, {TYPCATEGORY_UNKNOWN, TYPCATEGORY_NUMERIC}, {TYPCATEGORY_NUMERIC, TYPCATEGORY_STRING}, {TYPCATEGORY_NUMERIC, TYPCATEGORY_UNKNOWN}, {TYPCATEGORY_STRING, TYPCATEGORY_DATETIME}, {TYPCATEGORY_STRING, TYPCATEGORY_TIMESPAN}, - {TYPCATEGORY_UNKNOWN, TYPCATEGORY_DATETIME}, {TYPCATEGORY_UNKNOWN, TYPCATEGORY_TIMESPAN}}; + {TYPCATEGORY_UNKNOWN, TYPCATEGORY_DATETIME}, {TYPCATEGORY_UNKNOWN, TYPCATEGORY_TIMESPAN}, + {TYPCATEGORY_DATETIME, TYPCATEGORY_UNKNOWN}}; for (unsigned int i = 0; i < sizeof(categoryMatchedList) / sizeof(categoryMatchedList[0]); i++) { if (preferCategory == categoryMatchedList[i][0] && nextCategory == categoryMatchedList[i][1]) { diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 3d61bdf047..71f83fa036 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -2216,7 +2216,33 @@ static Node* transformCaseExpr(ParseState* pstate, CaseExpr* c) /* casecollid will be set by parse_collate.c */ /* Convert default result clause, if necessary */ - newc->defresult = (Expr*)coerce_to_common_type(pstate, (Node*)newc->defresult, ptype, "CASE/ELSE"); + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && c->fromDecode) { + Node *defResNode = (Node*)newc->defresult; + Oid sourceTypeId = exprType(defResNode); + if (sourceTypeId != ptype) { + /* + * only check if type can be coerced, return TypeCast node + * the TypeCast node will be executed when truly needed + */ + if (can_coerce_type(1, &sourceTypeId, &ptype, COERCION_IMPLICIT)) { + TypeCast *n = makeNode(TypeCast); + n->arg = defResNode; + n->typname = makeTypeNameFromOid(ptype, -1); + n->location = -1; + newc->defresult = (Expr*)n; + } else { + ereport(ERROR, + (errcode(ERRCODE_CANNOT_COERCE), + errmsg("%s could not convert type %s to %s", + "CASE/ELSE", + format_type_be(sourceTypeId), + format_type_be(ptype)), + parser_errposition(pstate, exprLocation(defResNode)))); + } + } + } else { + newc->defresult = (Expr*)coerce_to_common_type(pstate, (Node*)newc->defresult, ptype, "CASE/ELSE"); + } /* Convert when-clause results, if necessary */ foreach (l, newc->args) { diff --git a/src/gausskernel/optimizer/util/clauses.cpp b/src/gausskernel/optimizer/util/clauses.cpp index 2568542f71..f910103eca 100644 --- a/src/gausskernel/optimizer/util/clauses.cpp +++ b/src/gausskernel/optimizer/util/clauses.cpp @@ -3409,6 +3409,22 @@ Node* eval_const_expressions_mutator(Node* node, eval_const_expressions_context* return eval_const_expressions_mutator((Node*)phv->phexpr, context); } break; + case T_TypeCast: + { + TypeCast *tc = (TypeCast*)node; + Node *defResNode = tc->arg; + Oid sourceTypeId = exprType(defResNode); + Oid ptype = tc->typname->typeOid; + if (can_coerce_type(1, &sourceTypeId, &ptype, COERCION_IMPLICIT)) { + return coerce_type(NULL, defResNode, sourceTypeId, ptype, -1, + COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); + } else { + ereport(ERROR, + (errcode(ERRCODE_CANNOT_COERCE), errmsg("could not convert type %s to %s", + format_type_be(sourceTypeId), format_type_be(ptype)))); + } + break; + } default: break; } diff --git a/src/test/regress/expected/decode_compatible_with_o.out b/src/test/regress/expected/decode_compatible_with_o.out index c0f7c1e351..2523f914ed 100755 --- a/src/test/regress/expected/decode_compatible_with_o.out +++ b/src/test/regress/expected/decode_compatible_with_o.out @@ -19,6 +19,7 @@ create table tb_test( c_varchar2 varchar2, c_nvarchar2 nvarchar2, c_text text, + c_blank_text text, c_char2number_success text, c_raw raw, c_date date, @@ -35,11 +36,136 @@ create table tb_test( -- test1: implicit type conversion from defresult to result1 -- ========================================================= insert into tb_test values( - 't', 1, 2, 4, 8, 4.4, 8.8, 9.999, 66, 'char', 'bpchar', 'varchar2', 'nvarchar2', 'text', '7.77', '1234', + 't', 1, 2, 4, 8, 4.4, 8.8, 9.999, 66, 'char', 'bpchar', 'varchar2', 'nvarchar2', 'text', ' ', '7.77', '1234', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-12-12', '2013-12-11 pst', '2003-04-12 04:05:06', interval '2' year, '30 DAYS 12:00:00', abstime 'Mon May 1 00:30:30 1995' ); -- convert to bool +select decode(1, 1, c_bool, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and tinyint cannot be matched +LINE 1: select decode(1, 1, c_bool, c_int1) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and smallint cannot be matched +LINE 1: select decode(1, 1, c_bool, c_int2) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and integer cannot be matched +LINE 1: select decode(1, 1, c_bool, c_int4) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and bigint cannot be matched +LINE 1: select decode(1, 1, c_bool, c_int8) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and real cannot be matched +LINE 1: select decode(1, 1, c_bool, c_float4) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and double precision cannot be matched +LINE 1: select decode(1, 1, c_bool, c_float8) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and numeric cannot be matched +LINE 1: select decode(1, 1, c_bool, c_numeric) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and boolean cannot be matched +LINE 1: select decode(1, 1, c_bool, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and character cannot be matched +LINE 1: select decode(1, 1, c_bool, c_char) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and character cannot be matched +LINE 1: select decode(1, 1, c_bool, c_bpchar) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and character varying cannot be matched +LINE 1: select decode(1, 1, c_bool, c_varchar2) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_bool, c_nvarchar2) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and text cannot be matched +LINE 1: select decode(1, 1, c_bool, c_text) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and text cannot be matched +LINE 1: select decode(1, 1, c_bool, c_blank_text) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and text cannot be matched +LINE 1: select decode(1, 1, c_bool, c_char2number_success) as result... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and raw cannot be matched +LINE 1: select decode(1, 1, c_bool, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_bool, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_bool, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_bool, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_bool, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_bool, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_bool, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and interval cannot be matched +LINE 1: select decode(1, 1, c_bool, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and reltime cannot be matched +LINE 1: select decode(1, 1, c_bool, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bool, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and abstime cannot be matched +LINE 1: select decode(1, 1, c_bool, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_bool, c_int1) as result, pg_typeof(result) from tb_test; ERROR: DECODE types boolean and tinyint cannot be matched LINE 1: select decode(1, 2, c_bool, c_int1) as result, pg_typeof(res... @@ -105,6 +231,11 @@ ERROR: DECODE types boolean and text cannot be matched LINE 1: select decode(1, 2, c_bool, c_text) as result, pg_typeof(res... ^ CONTEXT: referenced column: result +select decode(1, 2, c_bool, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and text cannot be matched +LINE 1: select decode(1, 2, c_bool, c_blank_text) as result, pg_type... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_bool, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types boolean and text cannot be matched LINE 1: select decode(1, 2, c_bool, c_char2number_success) as result... @@ -161,6 +292,144 @@ LINE 1: select decode(1, 2, c_bool, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to int1 +select decode(1, 1, c_int1, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and boolean cannot be matched +LINE 1: select decode(1, 1, c_int1, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type tinyint to money +LINE 1: select decode(1, 1, c_int1, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 1, c_int1, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and raw cannot be matched +LINE 1: select decode(1, 1, c_int1, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int1, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_int1, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_int1, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int1, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_int1, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_int1, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and interval cannot be matched +LINE 1: select decode(1, 1, c_int1, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and reltime cannot be matched +LINE 1: select decode(1, 1, c_int1, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int1, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types tinyint and abstime cannot be matched +LINE 1: select decode(1, 1, c_int1, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_int1, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types tinyint and boolean cannot be matched LINE 1: select decode(1, 2, c_int1, c_bool) as result, pg_typeof(res... @@ -218,10 +487,15 @@ ERROR: invalid input syntax for type numeric: "varchar2" CONTEXT: referenced column: result select decode(1, 2, c_int1, c_nvarchar2) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result select decode(1, 2, c_int1, c_text) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "text" CONTEXT: referenced column: result +select decode(1, 2, c_int1, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result select decode(1, 2, c_int1, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -279,84 +553,227 @@ LINE 1: select decode(1, 2, c_int1, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to int2 -select decode(1, 2, c_int2, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types smallint and boolean cannot be matched -LINE 1: select decode(1, 2, c_int2, c_bool) as result, pg_typeof(res... +LINE 1: select decode(1, 1, c_int2, c_bool) as result, pg_typeof(res... ^ CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_int1) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 1 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_int4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_int8) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_float4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4.4 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_float8) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8.8 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_numeric) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 9.999 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_money) as result, pg_typeof(result) from tb_test; ERROR: CASE/ELSE could not convert type money to smallint -LINE 1: select decode(1, 2, c_int2, c_money) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_int2, c_money) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_char) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "char " -CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_bpchar) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "bpchar" -CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_varchar2) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "varchar2" -CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_nvarchar2) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_text) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "text" -CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_char) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 7.77 | numeric + 2 | numeric (1 row) -select decode(1, 2, c_int2, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 1, c_int2, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 1, c_int2, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 1, c_int2, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 1, c_int2, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 1, c_int2, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 1, c_int2, c_raw) as result, pg_typeof(result) from tb_test; ERROR: DECODE types smallint and raw cannot be matched -LINE 1: select decode(1, 2, c_int2, c_raw) as result, pg_typeof(resu... +LINE 1: select decode(1, 1, c_int2, c_raw) as result, pg_typeof(resu... ^ CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_date) as result, pg_typeof(result) from tb_test; ERROR: DECODE types smallint and timestamp without time zone cannot be matched -LINE 1: select decode(1, 2, c_int2, c_date) as result, pg_typeof(res... +LINE 1: select decode(1, 1, c_int2, c_date) as result, pg_typeof(res... ^ CONTEXT: referenced column: result -select decode(1, 2, c_int2, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_int2, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_int2, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int2, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_int2, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_int2, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and interval cannot be matched +LINE 1: select decode(1, 1, c_int2, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and reltime cannot be matched +LINE 1: select decode(1, 1, c_int2, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int2, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and abstime cannot be matched +LINE 1: select decode(1, 1, c_int2, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and boolean cannot be matched +LINE 1: select decode(1, 2, c_int2, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 2, c_int2, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 2, c_int2, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 2, c_int2, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 2, c_int2, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 2, c_int2, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 2, c_int2, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to smallint +LINE 1: select decode(1, 2, c_int2, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_char) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "char " +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "bpchar" +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "varchar2" +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "nvarchar2" +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result +select decode(1, 2, c_int2, c_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "text" +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 7.77 | numeric +(1 row) + +select decode(1, 2, c_int2, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and raw cannot be matched +LINE 1: select decode(1, 2, c_int2, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smallint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 2, c_int2, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int2, c_time) as result, pg_typeof(result) from tb_test; ERROR: DECODE types smallint and time without time zone cannot be matched LINE 1: select decode(1, 2, c_int2, c_time) as result, pg_typeof(res... ^ @@ -397,6 +814,144 @@ LINE 1: select decode(1, 2, c_int2, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to int4 +select decode(1, 1, c_int4, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and boolean cannot be matched +LINE 1: select decode(1, 1, c_int4, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to integer +LINE 1: select decode(1, 1, c_int4, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 1, c_int4, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and raw cannot be matched +LINE 1: select decode(1, 1, c_int4, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int4, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_int4, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_int4, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int4, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_int4, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_int4, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and interval cannot be matched +LINE 1: select decode(1, 1, c_int4, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and reltime cannot be matched +LINE 1: select decode(1, 1, c_int4, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int4, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types integer and abstime cannot be matched +LINE 1: select decode(1, 1, c_int4, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_int4, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types integer and boolean cannot be matched LINE 1: select decode(1, 2, c_int4, c_bool) as result, pg_typeof(res... @@ -454,10 +1009,15 @@ ERROR: invalid input syntax for type numeric: "varchar2" CONTEXT: referenced column: result select decode(1, 2, c_int4, c_nvarchar2) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result select decode(1, 2, c_int4, c_text) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "text" CONTEXT: referenced column: result +select decode(1, 2, c_int4, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result select decode(1, 2, c_int4, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -515,84 +1075,227 @@ LINE 1: select decode(1, 2, c_int4, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to int8 -select decode(1, 2, c_int8, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types bigint and boolean cannot be matched -LINE 1: select decode(1, 2, c_int8, c_bool) as result, pg_typeof(res... +LINE 1: select decode(1, 1, c_int8, c_bool) as result, pg_typeof(res... ^ CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_int1) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 1 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_int2) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 2 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_int4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_float4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4.4 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_float8) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8.8 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_numeric) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 9.999 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_money) as result, pg_typeof(result) from tb_test; ERROR: CASE/ELSE could not convert type money to bigint -LINE 1: select decode(1, 2, c_int8, c_money) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_int8, c_money) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_char) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "char " -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_bpchar) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "bpchar" -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_varchar2) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "varchar2" -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_nvarchar2) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_text) as result, pg_typeof(result) from tb_test; -ERROR: invalid input syntax for type numeric: "text" -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_char) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 7.77 | numeric + 8 | numeric (1 row) -select decode(1, 2, c_int8, c_raw) as result, pg_typeof(result) from tb_test; -ERROR: DECODE types bigint and raw cannot be matched -LINE 1: select decode(1, 2, c_int8, c_raw) as result, pg_typeof(resu... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_date) as result, pg_typeof(result) from tb_test; -ERROR: DECODE types bigint and timestamp without time zone cannot be matched -LINE 1: select decode(1, 2, c_int8, c_date) as result, pg_typeof(res... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_int8, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 1, c_int8, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 1, c_int8, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 1, c_int8, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 1, c_int8, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 1, c_int8, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 1, c_int8, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and raw cannot be matched +LINE 1: select decode(1, 1, c_int8, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int8, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_int8, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_int8, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_int8, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_int8, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_int8, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and interval cannot be matched +LINE 1: select decode(1, 1, c_int8, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and reltime cannot be matched +LINE 1: select decode(1, 1, c_int8, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_int8, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and abstime cannot be matched +LINE 1: select decode(1, 1, c_int8, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and boolean cannot be matched +LINE 1: select decode(1, 2, c_int8, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 2, c_int8, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 2, c_int8, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 2, c_int8, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 2, c_int8, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 2, c_int8, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 2, c_int8, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to bigint +LINE 1: select decode(1, 2, c_int8, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_char) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "char " +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "bpchar" +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "varchar2" +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "nvarchar2" +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result +select decode(1, 2, c_int8, c_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: "text" +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 7.77 | numeric +(1 row) + +select decode(1, 2, c_int8, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and raw cannot be matched +LINE 1: select decode(1, 2, c_int8, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types bigint and timestamp without time zone cannot be matched +LINE 1: select decode(1, 2, c_int8, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_int8, c_time) as result, pg_typeof(result) from tb_test; ERROR: DECODE types bigint and time without time zone cannot be matched LINE 1: select decode(1, 2, c_int8, c_time) as result, pg_typeof(res... ^ @@ -633,6 +1336,144 @@ LINE 1: select decode(1, 2, c_int8, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to float4 +select decode(1, 1, c_float4, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and boolean cannot be matched +LINE 1: select decode(1, 1, c_float4, c_bool) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to real +LINE 1: select decode(1, 1, c_float4, c_money) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | numeric +(1 row) + +select decode(1, 1, c_float4, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and raw cannot be matched +LINE 1: select decode(1, 1, c_float4, c_raw) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_float4, c_date) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_float4, c_time) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_float4, c_timetz) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_float4, c_timestamp) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_float4, c_timestamptz) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_float4, c_smalldatetime) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and interval cannot be matched +LINE 1: select decode(1, 1, c_float4, c_interval) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and reltime cannot be matched +LINE 1: select decode(1, 1, c_float4, c_reltime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float4, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types real and abstime cannot be matched +LINE 1: select decode(1, 1, c_float4, c_abstime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_float4, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types real and boolean cannot be matched LINE 1: select decode(1, 2, c_float4, c_bool) as result, pg_typeof(r... @@ -690,10 +1531,15 @@ ERROR: invalid input syntax for type numeric: "varchar2" CONTEXT: referenced column: result select decode(1, 2, c_float4, c_nvarchar2) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result select decode(1, 2, c_float4, c_text) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "text" CONTEXT: referenced column: result +select decode(1, 2, c_float4, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result select decode(1, 2, c_float4, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -751,36 +1597,174 @@ LINE 1: select decode(1, 2, c_float4, c_abstime) as result, pg_typeo... ^ CONTEXT: referenced column: result -- convert to float8 -select decode(1, 2, c_float8, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types double precision and boolean cannot be matched -LINE 1: select decode(1, 2, c_float8, c_bool) as result, pg_typeof(r... +LINE 1: select decode(1, 1, c_float8, c_bool) as result, pg_typeof(r... ^ CONTEXT: referenced column: result -select decode(1, 2, c_float8, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int1) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 1 | numeric + 8.8 | numeric (1 row) -select decode(1, 2, c_float8, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int2) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 2 | numeric + 8.8 | numeric (1 row) -select decode(1, 2, c_float8, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4 | numeric + 8.8 | numeric (1 row) -select decode(1, 2, c_float8, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int8) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8 | numeric + 8.8 | numeric (1 row) -select decode(1, 2, c_float8, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to double precision +LINE 1: select decode(1, 1, c_float8, c_money) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | numeric +(1 row) + +select decode(1, 1, c_float8, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and raw cannot be matched +LINE 1: select decode(1, 1, c_float8, c_raw) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_float8, c_date) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_float8, c_time) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_float8, c_timetz) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_float8, c_timestamp) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_float8, c_timestamptz) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_float8, c_smalldatetime) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and interval cannot be matched +LINE 1: select decode(1, 1, c_float8, c_interval) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and reltime cannot be matched +LINE 1: select decode(1, 1, c_float8, c_reltime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_float8, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and abstime cannot be matched +LINE 1: select decode(1, 1, c_float8, c_abstime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_float8, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types double precision and boolean cannot be matched +LINE 1: select decode(1, 2, c_float8, c_bool) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_float8, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | numeric +(1 row) + +select decode(1, 2, c_float8, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +select decode(1, 2, c_float8, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | numeric +(1 row) + +select decode(1, 2, c_float8, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | numeric +(1 row) + +select decode(1, 2, c_float8, c_float4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- 4.4 | numeric @@ -808,10 +1792,15 @@ ERROR: invalid input syntax for type numeric: "varchar2" CONTEXT: referenced column: result select decode(1, 2, c_float8, c_nvarchar2) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result select decode(1, 2, c_float8, c_text) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "text" CONTEXT: referenced column: result +select decode(1, 2, c_float8, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result select decode(1, 2, c_float8, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -869,6 +1858,144 @@ LINE 1: select decode(1, 2, c_float8, c_abstime) as result, pg_typeo... ^ CONTEXT: referenced column: result -- convert to numeric +select decode(1, 1, c_numeric, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and boolean cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_bool) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to numeric +LINE 1: select decode(1, 1, c_numeric, c_money) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | numeric +(1 row) + +select decode(1, 1, c_numeric, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and raw cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_raw) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_date) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_time) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_timetz) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_timestamp) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_timestamptz) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_smalldatetime) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and interval cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_interval) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and reltime cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_reltime) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_numeric, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types numeric and abstime cannot be matched +LINE 1: select decode(1, 1, c_numeric, c_abstime) as result, pg_type... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_numeric, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types numeric and boolean cannot be matched LINE 1: select decode(1, 2, c_numeric, c_bool) as result, pg_typeof(... @@ -926,10 +2053,15 @@ ERROR: invalid input syntax for type numeric: "varchar2" CONTEXT: referenced column: result select decode(1, 2, c_numeric, c_nvarchar2) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "nvarchar2" -CONTEXT: referenced column: result +CONTEXT: referenced column: to_number +SQL function "to_numeric" statement 1 +referenced column: result select decode(1, 2, c_numeric, c_text) as result, pg_typeof(result) from tb_test; ERROR: invalid input syntax for type numeric: "text" CONTEXT: referenced column: result +select decode(1, 2, c_numeric, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: result select decode(1, 2, c_numeric, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -987,104 +2119,234 @@ LINE 1: select decode(1, 2, c_numeric, c_abstime) as result, pg_type... ^ CONTEXT: referenced column: result -- convert to money -select decode(1, 2, c_money, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types boolean and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_bool) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_bool) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int1) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to tinyint -LINE 1: select decode(1, 2, c_money, c_int1) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_int1) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int2) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to smallint -LINE 1: select decode(1, 2, c_money, c_int2) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_int2) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int4) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to integer -LINE 1: select decode(1, 2, c_money, c_int4) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_int4) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int8) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to bigint -LINE 1: select decode(1, 2, c_money, c_int8) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_int8) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_float4) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to real -LINE 1: select decode(1, 2, c_money, c_float4) as result, pg_typeof(... +LINE 1: select decode(1, 1, c_money, c_float4) as result, pg_typeof(... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_float8) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to double precision -LINE 1: select decode(1, 2, c_money, c_float8) as result, pg_typeof(... +LINE 1: select decode(1, 1, c_money, c_float8) as result, pg_typeof(... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_numeric) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to numeric -LINE 1: select decode(1, 2, c_money, c_numeric) as result, pg_typeof... +LINE 1: select decode(1, 1, c_money, c_numeric) as result, pg_typeof... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_char) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to character -LINE 1: select decode(1, 2, c_money, c_char) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_char) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_bpchar) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to character -LINE 1: select decode(1, 2, c_money, c_bpchar) as result, pg_typeof(... +LINE 1: select decode(1, 1, c_money, c_bpchar) as result, pg_typeof(... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_varchar2) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to character varying -LINE 1: select decode(1, 2, c_money, c_varchar2) as result, pg_typeo... +LINE 1: select decode(1, 1, c_money, c_varchar2) as result, pg_typeo... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_nvarchar2) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to nvarchar2 -LINE 1: select decode(1, 2, c_money, c_nvarchar2) as result, pg_type... +LINE 1: select decode(1, 1, c_money, c_nvarchar2) as result, pg_type... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_text) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to text -LINE 1: select decode(1, 2, c_money, c_text) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_text) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_blank_text) as result, pg_typeof(result) from tb_test; ERROR: CASE/WHEN could not convert type money to text -LINE 1: select decode(1, 2, c_money, c_char2number_success) as resul... +LINE 1: select decode(1, 1, c_money, c_blank_text) as result, pg_typ... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to text +LINE 1: select decode(1, 1, c_money, c_char2number_success) as resul... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_money, c_raw) as result, pg_typeof(result) from tb_test; ERROR: DECODE types raw and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_raw) as result, pg_typeof(res... +LINE 1: select decode(1, 1, c_money, c_raw) as result, pg_typeof(res... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_date) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp without time zone and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_date) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_date) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_time) as result, pg_typeof(result) from tb_test; ERROR: DECODE types time without time zone and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_time) as result, pg_typeof(re... +LINE 1: select decode(1, 1, c_money, c_time) as result, pg_typeof(re... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_timetz) as result, pg_typeof(result) from tb_test; ERROR: DECODE types time with time zone and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_timetz) as result, pg_typeof(... +LINE 1: select decode(1, 1, c_money, c_timetz) as result, pg_typeof(... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_timestamp) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp without time zone and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_timestamp) as result, pg_type... +LINE 1: select decode(1, 1, c_money, c_timestamp) as result, pg_type... ^ CONTEXT: referenced column: result -select decode(1, 2, c_money, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_timestamptz) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp with time zone and money cannot be matched -LINE 1: select decode(1, 2, c_money, c_timestamptz) as result, pg_ty... +LINE 1: select decode(1, 1, c_money, c_timestamptz) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_money, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and money cannot be matched +LINE 1: select decode(1, 1, c_money, c_smalldatetime) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_money, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and money cannot be matched +LINE 1: select decode(1, 1, c_money, c_interval) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_money, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and money cannot be matched +LINE 1: select decode(1, 1, c_money, c_reltime) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_money, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and money cannot be matched +LINE 1: select decode(1, 1, c_money, c_abstime) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types boolean and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_bool) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to tinyint +LINE 1: select decode(1, 2, c_money, c_int1) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to smallint +LINE 1: select decode(1, 2, c_money, c_int2) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to integer +LINE 1: select decode(1, 2, c_money, c_int4) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to bigint +LINE 1: select decode(1, 2, c_money, c_int8) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to real +LINE 1: select decode(1, 2, c_money, c_float4) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to double precision +LINE 1: select decode(1, 2, c_money, c_float8) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to numeric +LINE 1: select decode(1, 2, c_money, c_numeric) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_char) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to character +LINE 1: select decode(1, 2, c_money, c_char) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to character +LINE 1: select decode(1, 2, c_money, c_bpchar) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to character varying +LINE 1: select decode(1, 2, c_money, c_varchar2) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to nvarchar2 +LINE 1: select decode(1, 2, c_money, c_nvarchar2) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_text) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to text +LINE 1: select decode(1, 2, c_money, c_text) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to text +LINE 1: select decode(1, 2, c_money, c_blank_text) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type money to text +LINE 1: select decode(1, 2, c_money, c_char2number_success) as resul... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_raw) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_date) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_time) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_timetz) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_timestamp) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_money, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and money cannot be matched +LINE 1: select decode(1, 2, c_money, c_timestamptz) as result, pg_ty... ^ CONTEXT: referenced column: result select decode(1, 2, c_money, c_smalldatetime) as result, pg_typeof(result) from tb_test; @@ -1108,6 +2370,144 @@ LINE 1: select decode(1, 2, c_money, c_abstime) as result, pg_typeof... ^ CONTEXT: referenced column: result -- convert to char +select decode(1, 1, c_char, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types character and boolean cannot be matched +LINE 1: select decode(1, 1, c_char, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to character +LINE 1: select decode(1, 1, c_char, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 1, c_char, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+------------------- + char | character varying +(1 row) + +select decode(1, 1, c_char, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + char | nvarchar2 +(1 row) + +select decode(1, 1, c_char, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + char | text +(1 row) + +select decode(1, 1, c_char, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + char | text +(1 row) + +select decode(1, 1, c_char, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + char | text +(1 row) + +select decode(1, 1, c_char, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types character and raw cannot be matched +LINE 1: select decode(1, 1, c_char, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_date) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp without time zone to character +LINE 1: select decode(1, 1, c_char, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to character +LINE 1: select decode(1, 1, c_char, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to character +LINE 1: select decode(1, 1, c_char, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp without time zone to character +LINE 1: select decode(1, 1, c_char, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp with time zone to character +LINE 1: select decode(1, 1, c_char, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type smalldatetime to character +LINE 1: select decode(1, 1, c_char, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type interval to character +LINE 1: select decode(1, 1, c_char, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type reltime to character +LINE 1: select decode(1, 1, c_char, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_char, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type abstime to character +LINE 1: select decode(1, 1, c_char, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_char, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types character and boolean cannot be matched LINE 1: select decode(1, 2, c_char, c_bool) as result, pg_typeof(res... @@ -1184,6 +2584,12 @@ select decode(1, 2, c_char, c_text) as result, pg_typeof(result) from tb_test; text | text (1 row) +select decode(1, 2, c_char, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + | text +(1 row) + select decode(1, 2, c_char, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -1241,68 +2647,206 @@ LINE 1: select decode(1, 2, c_char, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to bpchar -select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types character and boolean cannot be matched -LINE 1: select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(r... +LINE 1: select decode(1, 1, c_bpchar, c_bool) as result, pg_typeof(r... ^ CONTEXT: referenced column: result -select decode(1, 2, c_bpchar, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int1) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 1 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int2) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 2 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int8) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_float4) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4.4 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_float8) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8.8 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_numeric) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 9.999 | character + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_money) as result, pg_typeof(result) from tb_test; ERROR: CASE/ELSE could not convert type money to character -LINE 1: select decode(1, 2, c_bpchar, c_money) as result, pg_typeof(... +LINE 1: select decode(1, 1, c_bpchar, c_money) as result, pg_typeof(... ^ CONTEXT: referenced column: result -select decode(1, 2, c_bpchar, c_char) as result, pg_typeof(result) from tb_test; - result | pg_typeof -------------+----------- - char | character +select decode(1, 1, c_bpchar, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + bpchar | character (1 row) -select decode(1, 2, c_bpchar, c_varchar2) as result, pg_typeof(result) from tb_test; - result | pg_typeof -----------+------------------- - varchar2 | character varying +select decode(1, 1, c_bpchar, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+------------------- + bpchar | character varying +(1 row) + +select decode(1, 1, c_bpchar, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + bpchar | nvarchar2 +(1 row) + +select decode(1, 1, c_bpchar, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + bpchar | text +(1 row) + +select decode(1, 1, c_bpchar, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + bpchar | text +(1 row) + +select decode(1, 1, c_bpchar, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + bpchar | text +(1 row) + +select decode(1, 1, c_bpchar, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types character and raw cannot be matched +LINE 1: select decode(1, 1, c_bpchar, c_raw) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_date) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp without time zone to character +LINE 1: select decode(1, 1, c_bpchar, c_date) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to character +LINE 1: select decode(1, 1, c_bpchar, c_time) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to character +LINE 1: select decode(1, 1, c_bpchar, c_timetz) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp without time zone to character +LINE 1: select decode(1, 1, c_bpchar, c_timestamp) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp with time zone to character +LINE 1: select decode(1, 1, c_bpchar, c_timestamptz) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type smalldatetime to character +LINE 1: select decode(1, 1, c_bpchar, c_smalldatetime) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type interval to character +LINE 1: select decode(1, 1, c_bpchar, c_interval) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type reltime to character +LINE 1: select decode(1, 1, c_bpchar, c_reltime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_bpchar, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type abstime to character +LINE 1: select decode(1, 1, c_bpchar, c_abstime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types character and boolean cannot be matched +LINE 1: select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_bpchar, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | character +(1 row) + +select decode(1, 2, c_bpchar, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | character +(1 row) + +select decode(1, 2, c_bpchar, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | character +(1 row) + +select decode(1, 2, c_bpchar, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | character +(1 row) + +select decode(1, 2, c_bpchar, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | character +(1 row) + +select decode(1, 2, c_bpchar, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | character +(1 row) + +select decode(1, 2, c_bpchar, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | character +(1 row) + +select decode(1, 2, c_bpchar, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to character +LINE 1: select decode(1, 2, c_bpchar, c_money) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_bpchar, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------+----------- + char | character +(1 row) + +select decode(1, 2, c_bpchar, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying (1 row) select decode(1, 2, c_bpchar, c_nvarchar2) as result, pg_typeof(result) from tb_test; @@ -1317,6 +2861,12 @@ select decode(1, 2, c_bpchar, c_text) as result, pg_typeof(result) from tb_test; text | text (1 row) +select decode(1, 2, c_bpchar, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + | text +(1 row) + select decode(1, 2, c_bpchar, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -1374,6 +2924,149 @@ LINE 1: select decode(1, 2, c_bpchar, c_abstime) as result, pg_typeo... ^ CONTEXT: referenced column: result -- convert to varchar2 +select decode(1, 1, c_varchar2, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types character varying and boolean cannot be matched +LINE 1: select decode(1, 1, c_varchar2, c_bool) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_varchar2, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to character varying +LINE 1: select decode(1, 1, c_varchar2, c_money) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_varchar2, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_nvarchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+----------- + varchar2 | text +(1 row) + +select decode(1, 1, c_varchar2, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+----------- + varchar2 | text +(1 row) + +select decode(1, 1, c_varchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+----------- + varchar2 | text +(1 row) + +select decode(1, 1, c_varchar2, c_raw) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to character varying +LINE 1: select decode(1, 1, c_varchar2, c_time) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_varchar2, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to character varying +LINE 1: select decode(1, 1, c_varchar2, c_timetz) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_varchar2, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp with time zone to character varying +LINE 1: select decode(1, 1, c_varchar2, c_timestamptz) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_varchar2, c_smalldatetime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_interval) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+------------------- + varchar2 | character varying +(1 row) + +select decode(1, 1, c_varchar2, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type reltime to character varying +LINE 1: select decode(1, 1, c_varchar2, c_reltime) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_varchar2, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type abstime to character varying +LINE 1: select decode(1, 1, c_varchar2, c_abstime) as result, pg_typ... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_varchar2, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types character varying and boolean cannot be matched LINE 1: select decode(1, 2, c_varchar2, c_bool) as result, pg_typeof... @@ -1450,6 +3143,12 @@ select decode(1, 2, c_varchar2, c_text) as result, pg_typeof(result) from tb_tes text | text (1 row) +select decode(1, 2, c_varchar2, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + | text +(1 row) + select decode(1, 2, c_varchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- @@ -1512,142 +3211,431 @@ LINE 1: select decode(1, 2, c_varchar2, c_abstime) as result, pg_typ... ^ CONTEXT: referenced column: result -- convert to nvarchar2 -select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types nvarchar2 and boolean cannot be matched -LINE 1: select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeo... +LINE 1: select decode(1, 1, c_nvarchar2, c_bool) as result, pg_typeo... ^ CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_money) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | text +(1 row) + +select decode(1, 1, c_nvarchar2, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | text +(1 row) + +select decode(1, 1, c_nvarchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | text +(1 row) + +select decode(1, 1, c_nvarchar2, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types nvarchar2 and raw cannot be matched +LINE 1: select decode(1, 1, c_nvarchar2, c_raw) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_time) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_timetz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp with time zone to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_timestamptz) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type smalldatetime to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_smalldatetime) as result,... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_interval) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + nvarchar2 | nvarchar2 +(1 row) + +select decode(1, 1, c_nvarchar2, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type reltime to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_reltime) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_nvarchar2, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type abstime to nvarchar2 +LINE 1: select decode(1, 1, c_nvarchar2, c_abstime) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types nvarchar2 and boolean cannot be matched +LINE 1: select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 2 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 4.4 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 8.8 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 9.999 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_money) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_char) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + char | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_bpchar) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + bpchar | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +----------+----------- + varchar2 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 2, c_nvarchar2, c_blank_text) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + | text +(1 row) + +select decode(1, 2, c_nvarchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 7.77 | text +(1 row) + +select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types nvarchar2 and raw cannot be matched +LINE 1: select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------- + Fri Dec 10 00:00:00 2010 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_time) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_timetz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------- + Sun Dec 12 00:00:00 2010 | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type timestamp with time zone to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_timestamptz) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type smalldatetime to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_smalldatetime) as result,... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_interval) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + @ 2 years | nvarchar2 +(1 row) + +select decode(1, 2, c_nvarchar2, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type reltime to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_reltime) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 2, c_nvarchar2, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type abstime to nvarchar2 +LINE 1: select decode(1, 2, c_nvarchar2, c_abstime) as result, pg_ty... + ^ +CONTEXT: referenced column: result +-- convert to text +select decode(1, 1, c_text, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types text and boolean cannot be matched +LINE 1: select decode(1, 1, c_text, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_text, c_int1) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_int2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_int4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_int8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_float4) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_float8) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_numeric) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text +(1 row) + +select decode(1, 1, c_text, c_money) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type money to text +LINE 1: select decode(1, 1, c_text, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_text, c_char) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 1 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_bpchar) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 2 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_varchar2) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_nvarchar2) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_char2number_success) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 4.4 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_raw) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 8.8 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_date) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 9.999 | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_money) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type money to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_money) as result, pg_type... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_time) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - char | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_timetz) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - bpchar | nvarchar2 + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_varchar2) as result, pg_typeof(result) from tb_test; - result | pg_typeof -----------+----------- - varchar2 | nvarchar2 +select decode(1, 1, c_text, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_timestamptz) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- text | text (1 row) -select decode(1, 2, c_nvarchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_smalldatetime) as result, pg_typeof(result) from tb_test; result | pg_typeof --------+----------- - 7.77 | text + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof(result) from tb_test; -ERROR: DECODE types nvarchar2 and raw cannot be matched -LINE 1: select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_date) as result, pg_typeof(result) from tb_test; - result | pg_typeof ---------------------------+----------- - Fri Dec 10 00:00:00 2010 | nvarchar2 +select decode(1, 1, c_text, c_interval) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_time) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type time without time zone to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_time) as result, pg_typeo... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_timetz) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type time with time zone to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_timetz) as result, pg_typ... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_timestamp) as result, pg_typeof(result) from tb_test; - result | pg_typeof ---------------------------+----------- - Sun Dec 12 00:00:00 2010 | nvarchar2 +select decode(1, 1, c_text, c_reltime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_timestamptz) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type timestamp with time zone to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_timestamptz) as result, p... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_smalldatetime) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type smalldatetime to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_smalldatetime) as result,... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_interval) as result, pg_typeof(result) from tb_test; - result | pg_typeof ------------+----------- - @ 2 years | nvarchar2 +select decode(1, 1, c_text, c_abstime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + text | text (1 row) -select decode(1, 2, c_nvarchar2, c_reltime) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type reltime to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_reltime) as result, pg_ty... - ^ -CONTEXT: referenced column: result -select decode(1, 2, c_nvarchar2, c_abstime) as result, pg_typeof(result) from tb_test; -ERROR: CASE/ELSE could not convert type abstime to nvarchar2 -LINE 1: select decode(1, 2, c_nvarchar2, c_abstime) as result, pg_ty... - ^ -CONTEXT: referenced column: result --- convert to text select decode(1, 2, c_text, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types text and boolean cannot be matched LINE 1: select decode(1, 2, c_text, c_bool) as result, pg_typeof(res... @@ -1791,6 +3779,132 @@ select decode(1, 2, c_text, c_abstime) as result, pg_typeof(result) from tb_test (1 row) -- convert to raw +select decode(1, 1, c_raw, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and boolean cannot be matched +LINE 1: select decode(1, 1, c_raw, c_bool) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and tinyint cannot be matched +LINE 1: select decode(1, 1, c_raw, c_int1) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and smallint cannot be matched +LINE 1: select decode(1, 1, c_raw, c_int2) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and integer cannot be matched +LINE 1: select decode(1, 1, c_raw, c_int4) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and bigint cannot be matched +LINE 1: select decode(1, 1, c_raw, c_int8) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and real cannot be matched +LINE 1: select decode(1, 1, c_raw, c_float4) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and double precision cannot be matched +LINE 1: select decode(1, 1, c_raw, c_float8) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and numeric cannot be matched +LINE 1: select decode(1, 1, c_raw, c_numeric) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and raw cannot be matched +LINE 1: select decode(1, 1, c_raw, c_money) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and character cannot be matched +LINE 1: select decode(1, 1, c_raw, c_char) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and character cannot be matched +LINE 1: select decode(1, 1, c_raw, c_bpchar) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_varchar2) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------+----------- + 1234 | raw +(1 row) + +select decode(1, 1, c_raw, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_raw, c_nvarchar2) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_text) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type text to raw +LINE 1: select decode(1, 1, c_raw, c_text) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type text to raw +LINE 1: select decode(1, 1, c_raw, c_blank_text) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type text to raw +LINE 1: select decode(1, 1, c_raw, c_char2number_success) as result,... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_raw, c_date) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_raw, c_time) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_raw, c_timetz) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_raw, c_timestamp) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_raw, c_timestamptz) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_raw, c_smalldatetime) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and interval cannot be matched +LINE 1: select decode(1, 1, c_raw, c_interval) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and reltime cannot be matched +LINE 1: select decode(1, 1, c_raw, c_reltime) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_raw, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and abstime cannot be matched +LINE 1: select decode(1, 1, c_raw, c_abstime) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_raw, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types raw and boolean cannot be matched LINE 1: select decode(1, 2, c_raw, c_bool) as result, pg_typeof(resu... @@ -1859,6 +3973,11 @@ ERROR: CASE/ELSE could not convert type text to raw LINE 1: select decode(1, 2, c_raw, c_text) as result, pg_typeof(resu... ^ CONTEXT: referenced column: result +select decode(1, 2, c_raw, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type text to raw +LINE 1: select decode(1, 2, c_raw, c_blank_text) as result, pg_typeo... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_raw, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: CASE/ELSE could not convert type text to raw LINE 1: select decode(1, 2, c_raw, c_char2number_success) as result,... @@ -1904,12 +4023,141 @@ ERROR: DECODE types raw and reltime cannot be matched LINE 1: select decode(1, 2, c_raw, c_reltime) as result, pg_typeof(r... ^ CONTEXT: referenced column: result -select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(result) from tb_test; -ERROR: DECODE types raw and abstime cannot be matched -LINE 1: select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(r... - ^ +select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types raw and abstime cannot be matched +LINE 1: select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +-- convert to date +select decode(1, 1, c_date, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and boolean cannot be matched +LINE 1: select decode(1, 1, c_date, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and tinyint cannot be matched +LINE 1: select decode(1, 1, c_date, c_int1) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and smallint cannot be matched +LINE 1: select decode(1, 1, c_date, c_int2) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and integer cannot be matched +LINE 1: select decode(1, 1, c_date, c_int4) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and bigint cannot be matched +LINE 1: select decode(1, 1, c_date, c_int8) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and real cannot be matched +LINE 1: select decode(1, 1, c_date, c_float4) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and double precision cannot be matched +LINE 1: select decode(1, 1, c_date, c_float8) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and numeric cannot be matched +LINE 1: select decode(1, 1, c_date, c_numeric) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_date, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and character cannot be matched +LINE 1: select decode(1, 1, c_date, c_char) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and character cannot be matched +LINE 1: select decode(1, 1, c_date, c_bpchar) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and character varying cannot be matched +LINE 1: select decode(1, 1, c_date, c_varchar2) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_date, c_nvarchar2) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_date, c_text) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_date, c_blank_text) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_date, c_char2number_success) as result... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and raw cannot be matched +LINE 1: select decode(1, 1, c_date, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_date, c_time) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_date, c_timetz) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Fri Dec 10 00:00:00 2010 | timestamp without time zone +(1 row) + +select decode(1, 1, c_date, c_timestamptz) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Fri Dec 10 00:00:00 2010 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_date, c_smalldatetime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Fri Dec 10 00:00:00 2010 | timestamp without time zone +(1 row) + +select decode(1, 1, c_date, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and interval cannot be matched +LINE 1: select decode(1, 1, c_date, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_date, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and reltime cannot be matched +LINE 1: select decode(1, 1, c_date, c_reltime) as result, pg_typeof(... + ^ CONTEXT: referenced column: result --- convert to date +select decode(1, 1, c_date, c_abstime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Fri Dec 10 00:00:00 2010 | timestamp without time zone +(1 row) + select decode(1, 2, c_date, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp without time zone and boolean cannot be matched LINE 1: select decode(1, 2, c_date, c_bool) as result, pg_typeof(res... @@ -1980,6 +4228,11 @@ ERROR: DECODE types timestamp without time zone and text cannot be matched LINE 1: select decode(1, 2, c_date, c_text) as result, pg_typeof(res... ^ CONTEXT: referenced column: result +select decode(1, 2, c_date, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 2, c_date, c_blank_text) as result, pg_type... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_date, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp without time zone and text cannot be matched LINE 1: select decode(1, 2, c_date, c_char2number_success) as result... @@ -2035,6 +4288,132 @@ select decode(1, 2, c_date, c_abstime) as result, pg_typeof(result) from tb_test (1 row) -- convert to time +select decode(1, 1, c_time, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and boolean cannot be matched +LINE 1: select decode(1, 1, c_time, c_bool) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and tinyint cannot be matched +LINE 1: select decode(1, 1, c_time, c_int1) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and smallint cannot be matched +LINE 1: select decode(1, 1, c_time, c_int2) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and integer cannot be matched +LINE 1: select decode(1, 1, c_time, c_int4) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and bigint cannot be matched +LINE 1: select decode(1, 1, c_time, c_int8) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and real cannot be matched +LINE 1: select decode(1, 1, c_time, c_float4) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and double precision cannot be matched +LINE 1: select decode(1, 1, c_time, c_float8) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and numeric cannot be matched +LINE 1: select decode(1, 1, c_time, c_numeric) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_time, c_money) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and character cannot be matched +LINE 1: select decode(1, 1, c_time, c_char) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and character cannot be matched +LINE 1: select decode(1, 1, c_time, c_bpchar) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and character varying cannot be matched +LINE 1: select decode(1, 1, c_time, c_varchar2) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_time, c_nvarchar2) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_time, c_text) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_time, c_blank_text) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_time, c_char2number_success) as result... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and raw cannot be matched +LINE 1: select decode(1, 1, c_time, c_raw) as result, pg_typeof(resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_date) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time without time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_time, c_date) as result, pg_typeof(res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_timetz) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-------------+--------------------- + 21:21:21-08 | time with time zone +(1 row) + +select decode(1, 1, c_time, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time without time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_time, c_timestamp) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time without time zone to timestamp with time zone +LINE 1: select decode(1, 1, c_time, c_timestamptz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time without time zone to smalldatetime +LINE 1: select decode(1, 1, c_time, c_smalldatetime) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and interval cannot be matched +LINE 1: select decode(1, 1, c_time, c_interval) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and reltime cannot be matched +LINE 1: select decode(1, 1, c_time, c_reltime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_time, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time without time zone to abstime +LINE 1: select decode(1, 1, c_time, c_abstime) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_time, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types time without time zone and boolean cannot be matched LINE 1: select decode(1, 2, c_time, c_bool) as result, pg_typeof(res... @@ -2105,6 +4484,11 @@ ERROR: DECODE types time without time zone and text cannot be matched LINE 1: select decode(1, 2, c_time, c_text) as result, pg_typeof(res... ^ CONTEXT: referenced column: result +select decode(1, 2, c_time, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time without time zone and text cannot be matched +LINE 1: select decode(1, 2, c_time, c_blank_text) as result, pg_type... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_time, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types time without time zone and text cannot be matched LINE 1: select decode(1, 2, c_time, c_char2number_success) as result... @@ -2157,6 +4541,132 @@ LINE 1: select decode(1, 2, c_time, c_abstime) as result, pg_typeof(... ^ CONTEXT: referenced column: result -- convert to timetz +select decode(1, 1, c_timetz, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and boolean cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_bool) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and tinyint cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_int1) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and smallint cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_int2) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and integer cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_int4) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and bigint cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_int8) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and real cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_float4) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and double precision cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_float8) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and numeric cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_numeric) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_money) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and character cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_char) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and character cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_bpchar) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and character varying cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_varchar2) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_nvarchar2) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_text) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_blank_text) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_char2number_success) as resu... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and raw cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_raw) as result, pg_typeof(re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_date) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time with time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_timetz, c_date) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_time) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-------------+--------------------- + 21:21:21-08 | time with time zone +(1 row) + +select decode(1, 1, c_timetz, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time with time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_timetz, c_timestamp) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time with time zone to timestamp with time zone +LINE 1: select decode(1, 1, c_timetz, c_timestamptz) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time with time zone to smalldatetime +LINE 1: select decode(1, 1, c_timetz, c_smalldatetime) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and interval cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_interval) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and reltime cannot be matched +LINE 1: select decode(1, 1, c_timetz, c_reltime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timetz, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: CASE/WHEN could not convert type time with time zone to abstime +LINE 1: select decode(1, 1, c_timetz, c_abstime) as result, pg_typeo... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_timetz, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types time with time zone and boolean cannot be matched LINE 1: select decode(1, 2, c_timetz, c_bool) as result, pg_typeof(r... @@ -2227,6 +4737,11 @@ ERROR: DECODE types time with time zone and text cannot be matched LINE 1: select decode(1, 2, c_timetz, c_text) as result, pg_typeof(r... ^ CONTEXT: referenced column: result +select decode(1, 2, c_timetz, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types time with time zone and text cannot be matched +LINE 1: select decode(1, 2, c_timetz, c_blank_text) as result, pg_ty... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_timetz, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types time with time zone and text cannot be matched LINE 1: select decode(1, 2, c_timetz, c_char2number_success) as resu... @@ -2278,7 +4793,136 @@ ERROR: CASE/WHEN could not convert type time with time zone to abstime LINE 1: select decode(1, 2, c_timetz, c_abstime) as result, pg_typeo... ^ CONTEXT: referenced column: result --- convert to timestamp +-- convert to timestamp +select decode(1, 1, c_timestamp, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and boolean cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_bool) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and tinyint cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_int1) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and smallint cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_int2) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and integer cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_int4) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and bigint cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_int8) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and real cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_float4) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and double precision cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_float8) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and numeric cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_numeric) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_money) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and character cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_char) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and character cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_bpchar) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and character varying cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_varchar2) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_nvarchar2) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_text) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_blank_text) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_char2number_success) as r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and raw cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_raw) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Sun Dec 12 00:00:00 2010 | timestamp without time zone +(1 row) + +select decode(1, 1, c_timestamp, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_timestamp, c_time) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to timestamp without time zone +LINE 1: select decode(1, 1, c_timestamp, c_timetz) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_timestamptz) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Sun Dec 12 00:00:00 2010 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_timestamp, c_smalldatetime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Sun Dec 12 00:00:00 2010 | timestamp without time zone +(1 row) + +select decode(1, 1, c_timestamp, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and interval cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_interval) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and reltime cannot be matched +LINE 1: select decode(1, 1, c_timestamp, c_reltime) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamp, c_abstime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Sun Dec 12 00:00:00 2010 | timestamp without time zone +(1 row) + select decode(1, 2, c_timestamp, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp without time zone and boolean cannot be matched LINE 1: select decode(1, 2, c_timestamp, c_bool) as result, pg_typeo... @@ -2349,6 +4993,11 @@ ERROR: DECODE types timestamp without time zone and text cannot be matched LINE 1: select decode(1, 2, c_timestamp, c_text) as result, pg_typeo... ^ CONTEXT: referenced column: result +select decode(1, 2, c_timestamp, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp without time zone and text cannot be matched +LINE 1: select decode(1, 2, c_timestamp, c_blank_text) as result, pg... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_timestamp, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp without time zone and text cannot be matched LINE 1: select decode(1, 2, c_timestamp, c_char2number_success) as r... @@ -2404,6 +5053,135 @@ select decode(1, 2, c_timestamp, c_abstime) as result, pg_typeof(result) from tb (1 row) -- convert to timestamptz +select decode(1, 1, c_timestamptz, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and boolean cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_bool) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and tinyint cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_int1) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and smallint cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_int2) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and integer cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_int4) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and bigint cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_int8) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and real cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_float4) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and double precision cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_float8) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and numeric cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_numeric) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_money) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and character cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_char) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and character cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_bpchar) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and character varying cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_varchar2) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_nvarchar2) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_text) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_blank_text) as result, ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and text cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_char2number_success) as... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and raw cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_raw) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Wed Dec 11 00:00:00 2013 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_timestamptz, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to timestamp with time zone +LINE 1: select decode(1, 1, c_timestamptz, c_time) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to timestamp with time zone +LINE 1: select decode(1, 1, c_timestamptz, c_timetz) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Wed Dec 11 00:00:00 2013 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_timestamptz, c_smalldatetime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Wed Dec 11 00:00:00 2013 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_timestamptz, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and interval cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_interval) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and reltime cannot be matched +LINE 1: select decode(1, 1, c_timestamptz, c_reltime) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_timestamptz, c_abstime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Wed Dec 11 00:00:00 2013 -08 | timestamp with time zone +(1 row) + select decode(1, 2, c_timestamptz, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp with time zone and boolean cannot be matched LINE 1: select decode(1, 2, c_timestamptz, c_bool) as result, pg_typ... @@ -2474,6 +5252,11 @@ ERROR: DECODE types timestamp with time zone and text cannot be matched LINE 1: select decode(1, 2, c_timestamptz, c_text) as result, pg_typ... ^ CONTEXT: referenced column: result +select decode(1, 2, c_timestamptz, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types timestamp with time zone and text cannot be matched +LINE 1: select decode(1, 2, c_timestamptz, c_blank_text) as result, ... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_timestamptz, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types timestamp with time zone and text cannot be matched LINE 1: select decode(1, 2, c_timestamptz, c_char2number_success) as... @@ -2529,6 +5312,135 @@ select decode(1, 2, c_timestamptz, c_abstime) as result, pg_typeof(result) from (1 row) -- convert to smalldatetime +select decode(1, 1, c_smalldatetime, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and boolean cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_bool) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and tinyint cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_int1) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and smallint cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_int2) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and integer cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_int4) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and bigint cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_int8) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and real cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_float4) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and double precision cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_float8) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and numeric cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_numeric) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_money) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and character cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_char) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and character cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_bpchar) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and character varying cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_varchar2) as result, ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_nvarchar2) as result,... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and text cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_text) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and text cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_blank_text) as result... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and text cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_char2number_success) ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and raw cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_raw) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Sat Apr 12 04:05:00 2003 | timestamp without time zone +(1 row) + +select decode(1, 1, c_smalldatetime, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to smalldatetime +LINE 1: select decode(1, 1, c_smalldatetime, c_time) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to smalldatetime +LINE 1: select decode(1, 1, c_smalldatetime, c_timetz) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Sat Apr 12 04:05:00 2003 | timestamp without time zone +(1 row) + +select decode(1, 1, c_smalldatetime, c_timestamptz) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Sat Apr 12 04:05:00 2003 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_smalldatetime, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and interval cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_interval) as result, ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and reltime cannot be matched +LINE 1: select decode(1, 1, c_smalldatetime, c_reltime) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_smalldatetime, c_abstime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+--------------- + Sat Apr 12 04:05:00 2003 | smalldatetime +(1 row) + select decode(1, 2, c_smalldatetime, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types smalldatetime and boolean cannot be matched LINE 1: select decode(1, 2, c_smalldatetime, c_bool) as result, pg_t... @@ -2599,6 +5511,11 @@ ERROR: DECODE types smalldatetime and text cannot be matched LINE 1: select decode(1, 2, c_smalldatetime, c_text) as result, pg_t... ^ CONTEXT: referenced column: result +select decode(1, 2, c_smalldatetime, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types smalldatetime and text cannot be matched +LINE 1: select decode(1, 2, c_smalldatetime, c_blank_text) as result... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_smalldatetime, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types smalldatetime and text cannot be matched LINE 1: select decode(1, 2, c_smalldatetime, c_char2number_success) ... @@ -2654,6 +5571,132 @@ select decode(1, 2, c_smalldatetime, c_abstime) as result, pg_typeof(result) fro (1 row) -- convert to interval +select decode(1, 1, c_interval, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and boolean cannot be matched +LINE 1: select decode(1, 1, c_interval, c_bool) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and tinyint cannot be matched +LINE 1: select decode(1, 1, c_interval, c_int1) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and smallint cannot be matched +LINE 1: select decode(1, 1, c_interval, c_int2) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and integer cannot be matched +LINE 1: select decode(1, 1, c_interval, c_int4) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and bigint cannot be matched +LINE 1: select decode(1, 1, c_interval, c_int8) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and real cannot be matched +LINE 1: select decode(1, 1, c_interval, c_float4) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and double precision cannot be matched +LINE 1: select decode(1, 1, c_interval, c_float8) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and numeric cannot be matched +LINE 1: select decode(1, 1, c_interval, c_numeric) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and interval cannot be matched +LINE 1: select decode(1, 1, c_interval, c_money) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and character cannot be matched +LINE 1: select decode(1, 1, c_interval, c_char) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and character cannot be matched +LINE 1: select decode(1, 1, c_interval, c_bpchar) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and character varying cannot be matched +LINE 1: select decode(1, 1, c_interval, c_varchar2) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_interval, c_nvarchar2) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and text cannot be matched +LINE 1: select decode(1, 1, c_interval, c_text) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and text cannot be matched +LINE 1: select decode(1, 1, c_interval, c_blank_text) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and text cannot be matched +LINE 1: select decode(1, 1, c_interval, c_char2number_success) as re... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and raw cannot be matched +LINE 1: select decode(1, 1, c_interval, c_raw) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_interval, c_date) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_interval, c_time) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_interval, c_timetz) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_interval, c_timestamp) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_interval, c_timestamptz) as result, pg... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_interval, c_smalldatetime) as result, ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_interval, c_reltime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +-----------+----------- + @ 2 years | interval +(1 row) + +select decode(1, 1, c_interval, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and abstime cannot be matched +LINE 1: select decode(1, 1, c_interval, c_abstime) as result, pg_typ... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_interval, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types interval and boolean cannot be matched LINE 1: select decode(1, 2, c_interval, c_bool) as result, pg_typeof... @@ -2724,6 +5767,11 @@ ERROR: DECODE types interval and text cannot be matched LINE 1: select decode(1, 2, c_interval, c_text) as result, pg_typeof... ^ CONTEXT: referenced column: result +select decode(1, 2, c_interval, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types interval and text cannot be matched +LINE 1: select decode(1, 2, c_interval, c_blank_text) as result, pg_... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_interval, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types interval and text cannot be matched LINE 1: select decode(1, 2, c_interval, c_char2number_success) as re... @@ -2776,6 +5824,132 @@ LINE 1: select decode(1, 2, c_interval, c_abstime) as result, pg_typ... ^ CONTEXT: referenced column: result -- convert to reltime +select decode(1, 1, c_reltime, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and boolean cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_bool) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and tinyint cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_int1) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and smallint cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_int2) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and integer cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_int4) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and bigint cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_int8) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and real cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_float4) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and double precision cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_float8) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and numeric cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_numeric) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and reltime cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_money) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and character cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_char) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and character cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_bpchar) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and character varying cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_varchar2) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_nvarchar2) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and text cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_text) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and text cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_blank_text) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and text cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_char2number_success) as res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and raw cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_raw) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_date) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_date) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_time) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and time without time zone cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_time) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and time with time zone cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_timetz) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_timestamp) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and timestamp without time zone cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_timestamp) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_timestamptz) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and timestamp with time zone cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_timestamptz) as result, pg_... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_smalldatetime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and smalldatetime cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_smalldatetime) as result, p... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_reltime, c_interval) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------+----------- + @ 1 mon 12 hours | interval +(1 row) + +select decode(1, 1, c_reltime, c_abstime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and abstime cannot be matched +LINE 1: select decode(1, 1, c_reltime, c_abstime) as result, pg_type... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_reltime, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types reltime and boolean cannot be matched LINE 1: select decode(1, 2, c_reltime, c_bool) as result, pg_typeof(... @@ -2846,6 +6020,11 @@ ERROR: DECODE types reltime and text cannot be matched LINE 1: select decode(1, 2, c_reltime, c_text) as result, pg_typeof(... ^ CONTEXT: referenced column: result +select decode(1, 2, c_reltime, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types reltime and text cannot be matched +LINE 1: select decode(1, 2, c_reltime, c_blank_text) as result, pg_t... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_reltime, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types reltime and text cannot be matched LINE 1: select decode(1, 2, c_reltime, c_char2number_success) as res... @@ -2898,6 +6077,135 @@ LINE 1: select decode(1, 2, c_reltime, c_abstime) as result, pg_type... ^ CONTEXT: referenced column: result -- convert to abstime +select decode(1, 1, c_abstime, c_bool) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and boolean cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_bool) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_int1) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and tinyint cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_int1) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_int2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and smallint cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_int2) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_int4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and integer cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_int4) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_int8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and bigint cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_int8) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_float4) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and real cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_float4) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_float8) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and double precision cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_float8) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_numeric) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and numeric cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_numeric) as result, pg_type... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_money) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types money and abstime cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_money) as result, pg_typeof... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_char) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and character cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_char) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_bpchar) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and character cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_bpchar) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_varchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and character varying cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_varchar2) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_nvarchar2) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and nvarchar2 cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_nvarchar2) as result, pg_ty... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and text cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_text) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and text cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_blank_text) as result, pg_t... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_char2number_success) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and text cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_char2number_success) as res... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_raw) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and raw cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_raw) as result, pg_typeof(r... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_date) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Mon May 01 00:30:30 1995 | timestamp without time zone +(1 row) + +select decode(1, 1, c_abstime, c_time) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time without time zone to abstime +LINE 1: select decode(1, 1, c_abstime, c_time) as result, pg_typeof(... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_timetz) as result, pg_typeof(result) from tb_test; +ERROR: CASE/ELSE could not convert type time with time zone to abstime +LINE 1: select decode(1, 1, c_abstime, c_timetz) as result, pg_typeo... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_timestamp) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+----------------------------- + Mon May 01 00:30:30 1995 | timestamp without time zone +(1 row) + +select decode(1, 1, c_abstime, c_timestamptz) as result, pg_typeof(result) from tb_test; + result | pg_typeof +------------------------------+-------------------------- + Mon May 01 00:30:30 1995 -08 | timestamp with time zone +(1 row) + +select decode(1, 1, c_abstime, c_smalldatetime) as result, pg_typeof(result) from tb_test; + result | pg_typeof +--------------------------+--------------- + Mon May 01 00:31:00 1995 | smalldatetime +(1 row) + +select decode(1, 1, c_abstime, c_interval) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and interval cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_interval) as result, pg_typ... + ^ +CONTEXT: referenced column: result +select decode(1, 1, c_abstime, c_reltime) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and reltime cannot be matched +LINE 1: select decode(1, 1, c_abstime, c_reltime) as result, pg_type... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_abstime, c_bool) as result, pg_typeof(result) from tb_test; ERROR: DECODE types abstime and boolean cannot be matched LINE 1: select decode(1, 2, c_abstime, c_bool) as result, pg_typeof(... @@ -2968,6 +6276,11 @@ ERROR: DECODE types abstime and text cannot be matched LINE 1: select decode(1, 2, c_abstime, c_text) as result, pg_typeof(... ^ CONTEXT: referenced column: result +select decode(1, 2, c_abstime, c_blank_text) as result, pg_typeof(result) from tb_test; +ERROR: DECODE types abstime and text cannot be matched +LINE 1: select decode(1, 2, c_abstime, c_blank_text) as result, pg_t... + ^ +CONTEXT: referenced column: result select decode(1, 2, c_abstime, c_char2number_success) as result, pg_typeof(result) from tb_test; ERROR: DECODE types abstime and text cannot be matched LINE 1: select decode(1, 2, c_abstime, c_char2number_success) as res... @@ -3028,7 +6341,7 @@ CONTEXT: referenced column: result -- number comparison delete from tb_test; insert into tb_test values( - 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, '1', '1', '1', '1', '1', '1', '1', + 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, '1', '1', '1', '1', '1', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -3108,6 +6421,9 @@ select decode(c_text, c_bool, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; ERROR: operator does not exist: raw = boolean HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. @@ -3188,6 +6504,9 @@ select decode(c_text, c_int1, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3270,6 +6589,9 @@ select decode(c_text, c_int2, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3352,6 +6674,9 @@ select decode(c_text, c_int4, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3434,6 +6759,9 @@ select decode(c_text, c_int8, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3516,6 +6844,9 @@ select decode(c_text, c_float4, 'Conversion successfully!', 'Conversion failed!' Conversion successfully! (1 row) +select decode(c_blank_text, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3598,6 +6929,9 @@ select decode(c_text, c_float8, 'Conversion successfully!', 'Conversion failed!' Conversion successfully! (1 row) +select decode(c_blank_text, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3680,6 +7014,9 @@ select decode(c_text, c_numeric, 'Conversion successfully!', 'Conversion failed! Conversion successfully! (1 row) +select decode(c_blank_text, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type numeric: " " +CONTEXT: referenced column: case select decode(c_raw, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3733,6 +7070,9 @@ CONTEXT: referenced column: case select decode(c_text, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; ERROR: failed to find conversion function from money to numeric CONTEXT: referenced column: case +select decode(c_blank_text, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: failed to find conversion function from money to numeric +CONTEXT: referenced column: case select decode(c_raw, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; ERROR: operator does not exist: raw = money HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. @@ -3813,6 +7153,12 @@ select decode(c_text, c_char, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_raw, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3895,6 +7241,12 @@ select decode(c_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!' Conversion successfully! (1 row) +select decode(c_blank_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_raw, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -3977,6 +7329,12 @@ select decode(c_text, c_varchar2, 'Conversion successfully!', 'Conversion failed Conversion successfully! (1 row) +select decode(c_blank_text, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_raw, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4059,6 +7417,12 @@ select decode(c_text, c_nvarchar2, 'Conversion successfully!', 'Conversion faile Conversion successfully! (1 row) +select decode(c_blank_text, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_raw, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -4227,11 +7591,17 @@ select decode(c_text, c_raw, 'Conversion successfully!', 'Conversion failed!') f Conversion failed! (1 row) +select decode(c_blank_text, c_raw, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + -- datetime comparison delete from tb_test; insert into tb_test values( 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, - '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', '1', '1', + '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -4259,6 +7629,12 @@ select decode(c_text, c_char, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_date, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4313,6 +7689,12 @@ select decode(c_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!' Conversion successfully! (1 row) +select decode(c_blank_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_date, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4367,6 +7749,12 @@ select decode(c_text, c_varchar2, 'Conversion successfully!', 'Conversion failed Conversion successfully! (1 row) +select decode(c_blank_text, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_date, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4421,6 +7809,12 @@ select decode(c_text, c_nvarchar2, 'Conversion successfully!', 'Conversion faile Conversion successfully! (1 row) +select decode(c_blank_text, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_date, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4535,6 +7929,9 @@ select decode(c_text, c_date, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type timestamp: " " +CONTEXT: referenced column: case select decode(c_timestamp, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -4589,6 +7986,9 @@ select decode(c_text, c_timestamp, 'Conversion successfully!', 'Conversion faile Conversion failed! (1 row) +select decode(c_blank_text, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type timestamp: " " +CONTEXT: referenced column: case select decode(c_date, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -4643,6 +8043,9 @@ select decode(c_text, c_timestamptz, 'Conversion successfully!', 'Conversion fai Conversion failed! (1 row) +select decode(c_blank_text, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type timestamp: " " +CONTEXT: referenced column: case select decode(c_date, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -4697,6 +8100,12 @@ select decode(c_text, c_smalldatetime, 'Conversion successfully!', 'Conversion f Conversion failed! (1 row) +select decode(c_blank_text, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_date, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -4751,6 +8160,12 @@ select decode(c_text, c_abstime, 'Conversion successfully!', 'Conversion failed! Conversion failed! (1 row) +select decode(c_blank_text, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_date, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------- @@ -4779,7 +8194,7 @@ select decode(c_smalldatetime, c_abstime, 'Conversion successfully!', 'Conversio delete from tb_test; insert into tb_test values( 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, - '21:21:21', '21:21:21', '21:21:21', '21:21:21', '21:21:21', '1', '1', + '21:21:21', '21:21:21', '21:21:21', '21:21:21', '21:21:21', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -4873,6 +8288,12 @@ select decode(c_text, c_time, 'Conversion successfully!', 'Conversion failed!') Conversion successfully! (1 row) +select decode(c_blank_text, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_timetz, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4909,6 +8330,12 @@ select decode(c_text, c_timetz, 'Conversion successfully!', 'Conversion failed!' Conversion failed! (1 row) +select decode(c_blank_text, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_time, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -4919,7 +8346,7 @@ select decode(c_time, c_timetz, 'Conversion successfully!', 'Conversion failed!' delete from tb_test; insert into tb_test values( 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, - '2 year', '2 year', '2 year', '2 year', '2 year', '1', '1', + '2 year', '2 year', '2 year', '2 year', '2 year', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -5013,6 +8440,9 @@ select decode(c_text, c_interval, 'Conversion successfully!', 'Conversion failed Conversion successfully! (1 row) +select decode(c_blank_text, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; +ERROR: invalid input syntax for type interval: "" +CONTEXT: referenced column: case select decode(c_reltime, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -5049,6 +8479,12 @@ select decode(c_text, c_reltime, 'Conversion successfully!', 'Conversion failed! Conversion failed! (1 row) +select decode(c_blank_text, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; + case +-------------------- + Conversion failed! +(1 row) + select decode(c_interval, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; case -------------------------- @@ -5165,6 +8601,8 @@ CONTEXT: referenced column: case select case '1.0' when 1 then 'same' else 'different' end; ERROR: invalid input syntax for type bigint: "1.0" CONTEXT: referenced column: case +reset sql_beta_feature; +reset timezone; \c regression clean connection to all force for database decode_compatibility; drop database decode_compatibility; diff --git a/src/test/regress/sql/decode_compatible_with_o.sql b/src/test/regress/sql/decode_compatible_with_o.sql index d6cbaf5102..c727383b91 100644 --- a/src/test/regress/sql/decode_compatible_with_o.sql +++ b/src/test/regress/sql/decode_compatible_with_o.sql @@ -20,6 +20,7 @@ create table tb_test( c_varchar2 varchar2, c_nvarchar2 nvarchar2, c_text text, + c_blank_text text, c_char2number_success text, c_raw raw, c_date date, @@ -37,12 +38,38 @@ create table tb_test( -- test1: implicit type conversion from defresult to result1 -- ========================================================= insert into tb_test values( - 't', 1, 2, 4, 8, 4.4, 8.8, 9.999, 66, 'char', 'bpchar', 'varchar2', 'nvarchar2', 'text', '7.77', '1234', + 't', 1, 2, 4, 8, 4.4, 8.8, 9.999, 66, 'char', 'bpchar', 'varchar2', 'nvarchar2', 'text', ' ', '7.77', '1234', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-12-12', '2013-12-11 pst', '2003-04-12 04:05:06', interval '2' year, '30 DAYS 12:00:00', abstime 'Mon May 1 00:30:30 1995' ); -- convert to bool +select decode(1, 1, c_bool, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bool, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_bool, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_int2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_int4) as result, pg_typeof(result) from tb_test; @@ -56,6 +83,7 @@ select decode(1, 2, c_bool, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_bool, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_date) as result, pg_typeof(result) from tb_test; @@ -68,6 +96,32 @@ select decode(1, 2, c_bool, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_bool, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bool, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to int1 +select decode(1, 1, c_int1, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int1, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_int1, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_int2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_int4) as result, pg_typeof(result) from tb_test; @@ -81,6 +135,7 @@ select decode(1, 2, c_int1, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_int1, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_date) as result, pg_typeof(result) from tb_test; @@ -93,6 +148,32 @@ select decode(1, 2, c_int1, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_int1, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int1, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to int2 +select decode(1, 1, c_int2, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int2, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_int2, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_int4) as result, pg_typeof(result) from tb_test; @@ -106,6 +187,7 @@ select decode(1, 2, c_int2, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_int2, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_date) as result, pg_typeof(result) from tb_test; @@ -118,6 +200,32 @@ select decode(1, 2, c_int2, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_int2, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int2, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to int4 +select decode(1, 1, c_int4, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int4, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_int4, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_int2) as result, pg_typeof(result) from tb_test; @@ -131,6 +239,7 @@ select decode(1, 2, c_int4, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_int4, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_date) as result, pg_typeof(result) from tb_test; @@ -143,6 +252,32 @@ select decode(1, 2, c_int4, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_int4, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int4, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to int8 +select decode(1, 1, c_int8, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_int8, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_int8, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_int2) as result, pg_typeof(result) from tb_test; @@ -156,6 +291,7 @@ select decode(1, 2, c_int8, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_int8, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_date) as result, pg_typeof(result) from tb_test; @@ -168,6 +304,32 @@ select decode(1, 2, c_int8, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_int8, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_int8, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to float4 +select decode(1, 1, c_float4, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float4, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_float4, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_int2) as result, pg_typeof(result) from tb_test; @@ -181,6 +343,7 @@ select decode(1, 2, c_float4, c_bpchar) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_float4, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_float4, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_date) as result, pg_typeof(result) from tb_test; @@ -193,6 +356,32 @@ select decode(1, 2, c_float4, c_interval) as result, pg_typeof(result) from tb_t select decode(1, 2, c_float4, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float4, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to float8 +select decode(1, 1, c_float8, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_float8, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_float8, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_int2) as result, pg_typeof(result) from tb_test; @@ -206,6 +395,7 @@ select decode(1, 2, c_float8, c_bpchar) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_float8, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_float8, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_date) as result, pg_typeof(result) from tb_test; @@ -218,6 +408,32 @@ select decode(1, 2, c_float8, c_interval) as result, pg_typeof(result) from tb_t select decode(1, 2, c_float8, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_float8, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to numeric +select decode(1, 1, c_numeric, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_numeric, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_numeric, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_int2) as result, pg_typeof(result) from tb_test; @@ -231,6 +447,7 @@ select decode(1, 2, c_numeric, c_bpchar) as result, pg_typeof(result) from tb_te select decode(1, 2, c_numeric, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_numeric, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_date) as result, pg_typeof(result) from tb_test; @@ -243,6 +460,32 @@ select decode(1, 2, c_numeric, c_interval) as result, pg_typeof(result) from tb_ select decode(1, 2, c_numeric, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_numeric, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to money +select decode(1, 1, c_money, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_money, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_money, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_int2) as result, pg_typeof(result) from tb_test; @@ -256,6 +499,7 @@ select decode(1, 2, c_money, c_bpchar) as result, pg_typeof(result) from tb_test select decode(1, 2, c_money, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_money, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_date) as result, pg_typeof(result) from tb_test; @@ -268,6 +512,32 @@ select decode(1, 2, c_money, c_interval) as result, pg_typeof(result) from tb_te select decode(1, 2, c_money, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_money, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to char +select decode(1, 1, c_char, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_char, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_char, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_int2) as result, pg_typeof(result) from tb_test; @@ -281,6 +551,7 @@ select decode(1, 2, c_char, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_char, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_date) as result, pg_typeof(result) from tb_test; @@ -293,6 +564,32 @@ select decode(1, 2, c_char, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_char, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_char, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to bpchar +select decode(1, 1, c_bpchar, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_bpchar, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_int2) as result, pg_typeof(result) from tb_test; @@ -306,6 +603,7 @@ select decode(1, 2, c_bpchar, c_char) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_bpchar, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_date) as result, pg_typeof(result) from tb_test; @@ -318,6 +616,32 @@ select decode(1, 2, c_bpchar, c_interval) as result, pg_typeof(result) from tb_t select decode(1, 2, c_bpchar, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_bpchar, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to varchar2 +select decode(1, 1, c_varchar2, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_varchar2, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_varchar2, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_int2) as result, pg_typeof(result) from tb_test; @@ -331,6 +655,7 @@ select decode(1, 2, c_varchar2, c_char) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_varchar2, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_varchar2, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_date) as result, pg_typeof(result) from tb_test; @@ -343,6 +668,32 @@ select decode(1, 2, c_varchar2, c_interval) as result, pg_typeof(result) from tb select decode(1, 2, c_varchar2, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_varchar2, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to nvarchar2 +select decode(1, 1, c_nvarchar2, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_nvarchar2, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_int2) as result, pg_typeof(result) from tb_test; @@ -356,6 +707,7 @@ select decode(1, 2, c_nvarchar2, c_char) as result, pg_typeof(result) from tb_te select decode(1, 2, c_nvarchar2, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_nvarchar2, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_date) as result, pg_typeof(result) from tb_test; @@ -368,6 +720,31 @@ select decode(1, 2, c_nvarchar2, c_interval) as result, pg_typeof(result) from t select decode(1, 2, c_nvarchar2, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_nvarchar2, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to text +select decode(1, 1, c_text, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_text, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_text, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_text, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_text, c_int2) as result, pg_typeof(result) from tb_test; @@ -393,6 +770,32 @@ select decode(1, 2, c_text, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_text, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_text, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to raw +select decode(1, 1, c_raw, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_raw, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_raw, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_int2) as result, pg_typeof(result) from tb_test; @@ -407,6 +810,7 @@ select decode(1, 2, c_raw, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_raw, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_date) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_time) as result, pg_typeof(result) from tb_test; @@ -418,6 +822,32 @@ select decode(1, 2, c_raw, c_interval) as result, pg_typeof(result) from tb_test select decode(1, 2, c_raw, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to date +select decode(1, 1, c_date, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_date, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_date, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_int2) as result, pg_typeof(result) from tb_test; @@ -432,6 +862,7 @@ select decode(1, 2, c_date, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_date, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_time) as result, pg_typeof(result) from tb_test; @@ -443,6 +874,32 @@ select decode(1, 2, c_date, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_date, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_date, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to time +select decode(1, 1, c_time, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_time, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_time, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_int2) as result, pg_typeof(result) from tb_test; @@ -457,6 +914,7 @@ select decode(1, 2, c_time, c_bpchar) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_time, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_date) as result, pg_typeof(result) from tb_test; @@ -468,6 +926,32 @@ select decode(1, 2, c_time, c_interval) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_time, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_time, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to timetz +select decode(1, 1, c_timetz, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timetz, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_timetz, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_int2) as result, pg_typeof(result) from tb_test; @@ -482,6 +966,7 @@ select decode(1, 2, c_timetz, c_bpchar) as result, pg_typeof(result) from tb_tes select decode(1, 2, c_timetz, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_timetz, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_date) as result, pg_typeof(result) from tb_test; @@ -493,6 +978,32 @@ select decode(1, 2, c_timetz, c_interval) as result, pg_typeof(result) from tb_t select decode(1, 2, c_timetz, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timetz, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to timestamp +select decode(1, 1, c_timestamp, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamp, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_timestamp, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_int2) as result, pg_typeof(result) from tb_test; @@ -507,6 +1018,7 @@ select decode(1, 2, c_timestamp, c_bpchar) as result, pg_typeof(result) from tb_ select decode(1, 2, c_timestamp, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_timestamp, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_date) as result, pg_typeof(result) from tb_test; @@ -518,6 +1030,32 @@ select decode(1, 2, c_timestamp, c_interval) as result, pg_typeof(result) from t select decode(1, 2, c_timestamp, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamp, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to timestamptz +select decode(1, 1, c_timestamptz, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_timestamptz, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_timestamptz, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_int2) as result, pg_typeof(result) from tb_test; @@ -532,6 +1070,7 @@ select decode(1, 2, c_timestamptz, c_bpchar) as result, pg_typeof(result) from t select decode(1, 2, c_timestamptz, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_timestamptz, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_date) as result, pg_typeof(result) from tb_test; @@ -543,6 +1082,32 @@ select decode(1, 2, c_timestamptz, c_interval) as result, pg_typeof(result) from select decode(1, 2, c_timestamptz, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_timestamptz, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to smalldatetime +select decode(1, 1, c_smalldatetime, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_smalldatetime, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_smalldatetime, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_int2) as result, pg_typeof(result) from tb_test; @@ -557,6 +1122,7 @@ select decode(1, 2, c_smalldatetime, c_bpchar) as result, pg_typeof(result) from select decode(1, 2, c_smalldatetime, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_smalldatetime, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_date) as result, pg_typeof(result) from tb_test; @@ -568,6 +1134,32 @@ select decode(1, 2, c_smalldatetime, c_interval) as result, pg_typeof(result) fr select decode(1, 2, c_smalldatetime, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_smalldatetime, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to interval +select decode(1, 1, c_interval, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_reltime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_interval, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_interval, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_int2) as result, pg_typeof(result) from tb_test; @@ -582,6 +1174,7 @@ select decode(1, 2, c_interval, c_bpchar) as result, pg_typeof(result) from tb_t select decode(1, 2, c_interval, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_interval, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_date) as result, pg_typeof(result) from tb_test; @@ -593,6 +1186,32 @@ select decode(1, 2, c_interval, c_smalldatetime) as result, pg_typeof(result) fr select decode(1, 2, c_interval, c_reltime) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_interval, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to reltime +select decode(1, 1, c_reltime, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_reltime, c_abstime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_reltime, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_int2) as result, pg_typeof(result) from tb_test; @@ -607,6 +1226,7 @@ select decode(1, 2, c_reltime, c_bpchar) as result, pg_typeof(result) from tb_te select decode(1, 2, c_reltime, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_reltime, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_date) as result, pg_typeof(result) from tb_test; @@ -618,6 +1238,32 @@ select decode(1, 2, c_reltime, c_smalldatetime) as result, pg_typeof(result) fro select decode(1, 2, c_reltime, c_interval) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_reltime, c_abstime) as result, pg_typeof(result) from tb_test; -- convert to abstime +select decode(1, 1, c_abstime, c_bool) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_int1) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_int2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_int4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_int8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_float4) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_float8) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_numeric) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_money) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_char) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_bpchar) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_varchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_nvarchar2) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_blank_text) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_char2number_success) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_raw) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_date) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_time) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_timetz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_timestamp) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_timestamptz) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_smalldatetime) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_interval) as result, pg_typeof(result) from tb_test; +select decode(1, 1, c_abstime, c_reltime) as result, pg_typeof(result) from tb_test; + select decode(1, 2, c_abstime, c_bool) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_int1) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_int2) as result, pg_typeof(result) from tb_test; @@ -632,6 +1278,7 @@ select decode(1, 2, c_abstime, c_bpchar) as result, pg_typeof(result) from tb_te select decode(1, 2, c_abstime, c_varchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_nvarchar2) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_text) as result, pg_typeof(result) from tb_test; +select decode(1, 2, c_abstime, c_blank_text) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_char2number_success) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_raw) as result, pg_typeof(result) from tb_test; select decode(1, 2, c_abstime, c_date) as result, pg_typeof(result) from tb_test; @@ -650,7 +1297,7 @@ select decode(1, 2, c_abstime, c_reltime) as result, pg_typeof(result) from tb_t -- number comparison delete from tb_test; insert into tb_test values( - 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, '1', '1', '1', '1', '1', '1', '1', + 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, '1', '1', '1', '1', '1', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -668,6 +1315,7 @@ select decode(c_bpchar, c_bool, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_bool, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -683,6 +1331,7 @@ select decode(c_bpchar, c_int1, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_int1, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -698,6 +1347,7 @@ select decode(c_bpchar, c_int2, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_int2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -713,6 +1363,7 @@ select decode(c_bpchar, c_int4, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_int4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -728,6 +1379,7 @@ select decode(c_bpchar, c_int8, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_int8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -743,6 +1395,7 @@ select decode(c_bpchar, c_float4, 'Conversion successfully!', 'Conversion failed select decode(c_varchar2, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_float4, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -758,6 +1411,7 @@ select decode(c_bpchar, c_float8, 'Conversion successfully!', 'Conversion failed select decode(c_varchar2, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_float8, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -773,6 +1427,7 @@ select decode(c_bpchar, c_numeric, 'Conversion successfully!', 'Conversion faile select decode(c_varchar2, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_numeric, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -788,6 +1443,7 @@ select decode(c_bpchar, c_money, 'Conversion successfully!', 'Conversion failed! select decode(c_varchar2, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -803,6 +1459,7 @@ select decode(c_bpchar, c_char, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -818,6 +1475,7 @@ select decode(c_char, c_bpchar, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -833,6 +1491,7 @@ select decode(c_char, c_varchar2, 'Conversion successfully!', 'Conversion failed select decode(c_bpchar, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -848,6 +1507,7 @@ select decode(c_char, c_nvarchar2, 'Conversion successfully!', 'Conversion faile select decode(c_bpchar, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_varchar2, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_raw, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_bool, c_text, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -879,12 +1539,13 @@ select decode(c_bpchar, c_raw, 'Conversion successfully!', 'Conversion failed!') select decode(c_varchar2, c_raw, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_raw, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_raw, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_raw, 'Conversion successfully!', 'Conversion failed!') from tb_test; -- datetime comparison delete from tb_test; insert into tb_test values( 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, - '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', '1', '1', + '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', '12-10-2010', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -893,6 +1554,7 @@ select decode(c_bpchar, c_char, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_char, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -903,6 +1565,7 @@ select decode(c_char, c_bpchar, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_bpchar, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -913,6 +1576,7 @@ select decode(c_char, c_varchar2, 'Conversion successfully!', 'Conversion failed select decode(c_bpchar, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_varchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -923,6 +1587,7 @@ select decode(c_char, c_nvarchar2, 'Conversion successfully!', 'Conversion faile select decode(c_bpchar, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_varchar2, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_nvarchar2, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -944,6 +1609,7 @@ select decode(c_bpchar, c_date, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_smalldatetime, c_date, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -954,6 +1620,7 @@ select decode(c_bpchar, c_timestamp, 'Conversion successfully!', 'Conversion fai select decode(c_varchar2, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_smalldatetime, c_timestamp, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -964,6 +1631,7 @@ select decode(c_bpchar, c_timestamptz, 'Conversion successfully!', 'Conversion f select decode(c_varchar2, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_smalldatetime, c_timestamptz, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -974,6 +1642,7 @@ select decode(c_bpchar, c_smalldatetime, 'Conversion successfully!', 'Conversion select decode(c_varchar2, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_smalldatetime, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -984,6 +1653,7 @@ select decode(c_bpchar, c_abstime, 'Conversion successfully!', 'Conversion faile select decode(c_varchar2, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_date, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamp, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timestamptz, c_abstime, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -993,7 +1663,7 @@ select decode(c_smalldatetime, c_abstime, 'Conversion successfully!', 'Conversio delete from tb_test; insert into tb_test values( 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, - '21:21:21', '21:21:21', '21:21:21', '21:21:21', '21:21:21', '1', '1', + '21:21:21', '21:21:21', '21:21:21', '21:21:21', '21:21:21', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -1018,6 +1688,7 @@ select decode(c_bpchar, c_time, 'Conversion successfully!', 'Conversion failed!' select decode(c_varchar2, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_timetz, c_time, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_char, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -1025,13 +1696,14 @@ select decode(c_bpchar, c_timetz, 'Conversion successfully!', 'Conversion failed select decode(c_varchar2, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_time, c_timetz, 'Conversion successfully!', 'Conversion failed!') from tb_test; -- interval comparison delete from tb_test; insert into tb_test values( 1, 1, 1, 1, 1, 1.0, 1.0, 1.0, 1, - '2 year', '2 year', '2 year', '2 year', '2 year', '1', '1', + '2 year', '2 year', '2 year', '2 year', '2 year', ' ', '1', '1', date '12-10-2010', '21:21:21', '21:21:21 pst', '2010-10-12', '2010-10-12 pst', '2010-10-12', interval '2' year, '2 year', abstime '2010-10-12' ); @@ -1056,6 +1728,7 @@ select decode(c_bpchar, c_interval, 'Conversion successfully!', 'Conversion fail select decode(c_varchar2, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_reltime, c_interval, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_char, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; @@ -1063,6 +1736,7 @@ select decode(c_bpchar, c_reltime, 'Conversion successfully!', 'Conversion faile select decode(c_varchar2, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_nvarchar2, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_text, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; +select decode(c_blank_text, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; select decode(c_interval, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test; ---- @@ -1099,6 +1773,9 @@ select decode('1.0'::text, 1, 'same', 'different'); select case 1 when '1.0' then 'same' else 'different' end; select case '1.0' when 1 then 'same' else 'different' end; +reset sql_beta_feature; +reset timezone; + \c regression clean connection to all force for database decode_compatibility; drop database decode_compatibility; -- Gitee From c1499a1a29c39577342b255f7259f2e8f289c4c8 Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 15 Aug 2024 19:27:09 +0800 Subject: [PATCH 200/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9Amerge?= =?UTF-8?q?=20into=E6=BA=90=E4=B8=BA=E7=89=A9=E5=8C=96=E8=A7=86=E5=9B=BE?= =?UTF-8?q?=EF=BC=8C=E6=8A=A5=E9=94=99cannot=20lock=20rows=20in=20material?= =?UTF-8?q?ized=20view?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/runtime/executor/execMain.cpp | 12 ++++++---- .../regress/expected/merge_into_updated.out | 24 +++++++++++++++++++ src/test/regress/sql/merge_into_updated.sql | 24 +++++++++++++++++++ 3 files changed, 55 insertions(+), 5 deletions(-) diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp index 2be1fd4655..d1efddd813 100755 --- a/src/gausskernel/runtime/executor/execMain.cpp +++ b/src/gausskernel/runtime/executor/execMain.cpp @@ -1790,11 +1790,13 @@ static void CheckValidRowMarkRel(Relation rel, RowMarkType markType) errmsg("cannot lock rows in contview \"%s\"", RelationGetRelationName(rel)))); break; case RELKIND_MATVIEW: - /* Should not get here */ - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot lock rows in materialized view \"%s\"", - RelationGetRelationName(rel)))); + /* Allow referencing a matview, but not actual locking clauses */ + if (markType != ROW_MARK_REFERENCE) { + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot lock rows in materialized view \"%s\"", + RelationGetRelationName(rel)))); + } break; case RELKIND_FOREIGN_TABLE: /* Should not get here; planner should have used ROW_MARK_COPY */ diff --git a/src/test/regress/expected/merge_into_updated.out b/src/test/regress/expected/merge_into_updated.out index 993e20df37..3f996778b4 100644 --- a/src/test/regress/expected/merge_into_updated.out +++ b/src/test/regress/expected/merge_into_updated.out @@ -134,6 +134,30 @@ select c1, c2, to_char(c3, 'yyyy-mm-dd') from t1 order by c1; 4 | e | 2023-09-19 (2 rows) +-- materialized view +drop table if exists t_a; +NOTICE: table "t_a" does not exist, skipping +drop table if exists t_b; +NOTICE: table "t_b" does not exist, skipping +create table t_a( +a_id int, +a_name varchar2(100) +); +create table t_b( +b_id int, +b_name varchar2(100) +); +DROP materialized VIEW if exists v_t_b; +NOTICE: materialized view "v_t_b" does not exist, skipping +create materialized view v_t_b as select * from t_b; +MERGE INTO t_a a +USING v_t_b vb +on (a.a_id=vb.b_id) +WHEN MATCHED THEN +update set a.a_name=vb.b_name; +DROP materialized VIEW v_t_b; +drop table t_a; +drop table t_b; drop schema merge_into_updated cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table t1 diff --git a/src/test/regress/sql/merge_into_updated.sql b/src/test/regress/sql/merge_into_updated.sql index 4d2b13af3a..46dcf4c81f 100644 --- a/src/test/regress/sql/merge_into_updated.sql +++ b/src/test/regress/sql/merge_into_updated.sql @@ -115,4 +115,28 @@ end; \parallel off select c1, c2, to_char(c3, 'yyyy-mm-dd') from t1 order by c1; +-- materialized view +drop table if exists t_a; +drop table if exists t_b; +create table t_a( +a_id int, +a_name varchar2(100) +); +create table t_b( +b_id int, +b_name varchar2(100) +); +DROP materialized VIEW if exists v_t_b; +create materialized view v_t_b as select * from t_b; + +MERGE INTO t_a a +USING v_t_b vb +on (a.a_id=vb.b_id) +WHEN MATCHED THEN +update set a.a_name=vb.b_name; + +DROP materialized VIEW v_t_b; +drop table t_a; +drop table t_b; + drop schema merge_into_updated cascade; -- Gitee From d49a4ec6b553007407cea0e55e2fb468ecf4f696 Mon Sep 17 00:00:00 2001 From: blig Date: Wed, 21 Aug 2024 14:58:59 +0800 Subject: [PATCH 201/347] =?UTF-8?q?=E3=80=90=E5=9B=9E=E5=90=886.0.0?= =?UTF-8?q?=E3=80=91mppdb=5Fdecoding=E9=80=BB=E8=BE=91=E5=A4=8D=E5=88=B6?= =?UTF-8?q?=E6=A7=BD=E8=A7=A3=E7=A0=81=E6=B7=BB=E5=8A=A0truncate=20table?= =?UTF-8?q?=E4=BA=8B=E5=8A=A1=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/mppdb_decoding/mppdb_decoding.cpp | 52 ++++++++++++++++++++++- src/test/regress/output/ddl.source | 29 +++++++++++++ src/test/regress/sql/ddl.sql | 9 ++++ 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/contrib/mppdb_decoding/mppdb_decoding.cpp b/contrib/mppdb_decoding/mppdb_decoding.cpp index 4953f24e5c..60f22c7404 100644 --- a/contrib/mppdb_decoding/mppdb_decoding.cpp +++ b/contrib/mppdb_decoding/mppdb_decoding.cpp @@ -63,7 +63,8 @@ static void pg_output_begin( static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); - +static void pg_decode_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change); static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); @@ -86,6 +87,7 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) cb->startup_cb = pg_decode_startup; cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; + cb->truncate_cb = pg_decode_truncate; cb->commit_cb = pg_decode_commit_txn; cb->abort_cb = pg_decode_abort_txn; cb->prepare_cb = pg_decode_prepare_txn; @@ -312,6 +314,54 @@ static void TupleToJsoninfo(Relation relation, cJSON* cols_name, cJSON* cols_typ } } +static void pg_decode_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + PluginTestDecodingData *data; + MemoryContext old; + int i; + + data = (PluginTestDecodingData*)ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + + OutputPluginPrepareWrite(ctx, true); + + appendStringInfoString(ctx->out, "table "); + + for (i = 0; i < nrelations; i++) { + if (i > 0) + appendStringInfoString(ctx->out, ", "); + + appendStringInfoString(ctx->out, + quote_qualified_identifier(get_namespace_name(relations[i]->rd_rel->relnamespace), + NameStr(relations[i]->rd_rel->relname))); + } + + appendStringInfoString(ctx->out, ": TRUNCATE:"); + + if (change->data.truncate.restart_seqs + || change->data.truncate.cascade) { + if (change->data.truncate.restart_seqs) + appendStringInfo(ctx->out, " restart_seqs"); + if (change->data.truncate.cascade) + appendStringInfo(ctx->out, " cascade"); + } else + appendStringInfoString(ctx->out, " (no-flags)"); + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} + /* * callback for individual changed tuples */ diff --git a/src/test/regress/output/ddl.source b/src/test/regress/output/ddl.source index 54758d6bb6..e362dc502d 100644 --- a/src/test/regress/output/ddl.source +++ b/src/test/regress/output/ddl.source @@ -361,3 +361,32 @@ drop table tr_sub; drop table table_without_key; drop table bmsql_order_line; drop sequence toasttable_rand_seq; +CREATE publication pub1 FOR ALL TABLES with (ddl = 'all'); +select 'init' from pg_create_logical_replication_slot('slot1', 'mppdb_decoding'); + ?column? +---------- + init +(1 row) + +create table test_truncate(id int); +truncate table test_truncate; +SELECT data FROM pg_logical_slot_get_changes('slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +--?.* +--?.* + BEGIN +--?.* +--?.* +--?.* + BEGIN + table public.test_truncate: TRUNCATE: (no-flags) +--?.* +(6 rows) + +SELECT pg_drop_replication_slot('slot1'); + pg_drop_replication_slot +-------------------------- + +(1 row) + +drop publication pub1; +--?.* diff --git a/src/test/regress/sql/ddl.sql b/src/test/regress/sql/ddl.sql index ea24afa42b..bc18b27c63 100644 --- a/src/test/regress/sql/ddl.sql +++ b/src/test/regress/sql/ddl.sql @@ -219,3 +219,12 @@ drop table tr_sub; drop table table_without_key; drop table bmsql_order_line; drop sequence toasttable_rand_seq; + +CREATE publication pub1 FOR ALL TABLES with (ddl = 'all'); +select 'init' from pg_create_logical_replication_slot('slot1', 'mppdb_decoding'); +create table test_truncate(id int); +truncate table test_truncate; +SELECT data FROM pg_logical_slot_get_changes('slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT pg_drop_replication_slot('slot1'); +drop publication pub1; +drop table test_truncate; \ No newline at end of file -- Gitee From 18411c1b4282b7b35cc7423091b4129bdd79ffe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Wed, 21 Aug 2024 10:06:25 +0800 Subject: [PATCH 202/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Durq=20meta=E6=A0=A1?= =?UTF-8?q?=E9=AA=8C=E8=AF=AF=E6=8A=A5error?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/ubtree/ubtrecycle.cpp | 26 ++++++++++++------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp index 25ac4091e7..bcec8572c4 100644 --- a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp @@ -1329,26 +1329,32 @@ static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, Bloc BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - BlockNumber indexBlocks = (rel == NULL ? metaData->nblocksUpper : RelationGetNumberOfBlocks(rel)); - uint32 urqBlocks = MaxBlockNumber; - Oid oid = InvalidOid; - bool metaError = false; + BlockNumber urqBlocks = MaxBlockNumber; + BlockNumber indexBlocks = metaData->nblocksUpper; RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (rel != NULL) { RelationOpenSmgr(rel); urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); - oid = rel->rd_id; + indexBlocks = RelationGetNumberOfBlocks(rel); } - metaError = ((metaData->headBlkno == 1 - metaBlkno) || (metaData->tailBlkno == 1 - metaBlkno)) || - (metaData->headBlkno >= urqBlocks || metaData->tailBlkno >= urqBlocks) || (metaData->nblocksUpper > indexBlocks); - + bool metaError = (metaData->headBlkno == 1 - metaBlkno) || (metaData->tailBlkno == 1 - metaBlkno); + if (!metaError && rel != NULL) { + if (metaData->headBlkno >= urqBlocks || metaData->tailBlkno >= urqBlocks) { + urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); + metaError = metaData->headBlkno >= urqBlocks || metaData->tailBlkno >= urqBlocks; + } + if (!metaError && metaData->nblocksUpper > indexBlocks) { + indexBlocks = RelationGetNumberOfBlocks(rel); + metaError = metaData->nblocksUpper > indexBlocks; + } + } if (metaError) { ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( "[Verify URQ] urq meta is invalid : (meta info : headBlkno = %u, tailBlkno = %u, " "nblocksUpper = %u, nblocksLower = %u; urq_blocks = %u, index_blocks = %u), rnode[%u,%u,%u], block %u", - oid, metaBlkno, metaData->headBlkno, metaData->tailBlkno, metaData->nblocksUpper, - metaData->nblocksLower, urqBlocks, indexBlocks, rNode.spcNode, rNode.dbNode, rNode.relNode, metaBlkno))); + metaData->headBlkno, metaData->tailBlkno, metaData->nblocksUpper, metaData->nblocksLower, + urqBlocks, indexBlocks, rNode.spcNode, rNode.dbNode, rNode.relNode, metaBlkno))); } } \ No newline at end of file -- Gitee From ede1efa3237591b9f7e655b2adc85ec3c40c9a40 Mon Sep 17 00:00:00 2001 From: wang-mingxuanHedgehog <504013468@qq.com> Date: Thu, 22 Aug 2024 14:50:39 +0800 Subject: [PATCH 203/347] =?UTF-8?q?=E5=88=9B=E8=A1=A8=E6=97=B6=E6=8C=87?= =?UTF-8?q?=E5=AE=9A=E5=85=B6=E4=BB=96with=E5=8F=82=E6=95=B0=E6=97=B6?= =?UTF-8?q?=EF=BC=8C\d=E6=9F=A5=E8=AF=A2=E8=A1=A8=E4=BF=A1=E6=81=AF?= =?UTF-8?q?=E7=9A=84storage=5Ftype=E5=A4=A7=E5=B0=8F=E5=86=99=E4=B8=8D?= =?UTF-8?q?=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/tablecmds.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index e6a6e524fa..50b0cb48a2 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -1337,7 +1337,7 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt } if (g_instance.attr.attr_storage.enable_ustore && u_sess->attr.attr_sql.enable_default_ustore_table && relkind != RELKIND_MATVIEW && !IsSystemNamespace(relnamespace) && !assignedStorageType) { - DefElem *def2 = makeDefElem("storage_type", (Node *)makeString(TABLE_ACCESS_METHOD_USTORE)); + DefElem *def2 = makeDefElem("storage_type", (Node *)makeString(TABLE_ACCESS_METHOD_USTORE_LOWER)); res = lappend(options, def2); } } -- Gitee From c43386a5ff3af4ae5c4794699748a4b05af0e4ea Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Mon, 19 Aug 2024 10:33:13 +0800 Subject: [PATCH 204/347] =?UTF-8?q?gs=5Fdump=E5=AF=BC=E5=87=BA=E6=97=B6?= =?UTF-8?q?=E6=B7=BB=E5=8A=A0behavior=5Fcompat=5Foptions=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_backup_archiver.cpp | 23 ---------- src/bin/pg_dump/pg_dump.cpp | 43 +++++++++++++++++++ src/test/regress/input/test_float_dump.source | 25 ++++++++++- .../regress/output/test_float_dump.source | 36 ++++++++++++++++ 4 files changed, 103 insertions(+), 24 deletions(-) diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp index 7600bac2dd..dfd4491c94 100644 --- a/src/bin/pg_dump/pg_backup_archiver.cpp +++ b/src/bin/pg_dump/pg_backup_archiver.cpp @@ -203,7 +203,6 @@ static ParallelStateEntry* GetMyPSEntry(ParallelState* pstate); static void archive_close_connection(int code, void* arg); static void take_down_nsname_in_drop_stmt(const char *stmt, char *result, int len); static void get_role_password(RestoreOptions* opts); -static char* GetBehaviorCompatOptions(ArchiveHandle* fout); /* * Wrapper functions. @@ -2836,11 +2835,6 @@ static void _doSetFixedOutputState(ArchiveHandle* AH) if (findDBCompatibility(&AH->publicArc, PQdb(GetConnection(&AH->publicArc))) && hasSpecificExtension(&AH->publicArc, "dolphin")) (void)ahprintf(AH, "SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length';\n"); - /* set behavior_compat_options */ - char* compatOptions = GetBehaviorCompatOptions(AH); - (void)ahprintf(AH, "SET behavior_compat_options = '%s';\n", compatOptions); - free(compatOptions); - (void)ahprintf(AH, "\n"); } @@ -5298,21 +5292,4 @@ bool hasSpecificExtension(Archive* fout, const char* extensionName) PQclear(res); destroyPQExpBuffer(query); return ntups != 0; -} - -static char* GetBehaviorCompatOptions(ArchiveHandle* fout) -{ - char* val = NULL; - PGresult* res = PQexec(fout->connection, "show behavior_compat_options;"); - - if (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK) { - val = gs_strdup(PQgetvalue(res, 0, 0)); - } else { - val = gs_strdup(""); - } - - PQclear(res); - res = NULL; - - return val; } \ No newline at end of file diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 4cf78c39dd..85c6a50699 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -519,6 +519,7 @@ static void dumpColumnEncryptionKeys(Archive* fout, const Oid nspoid, const char #endif static void dumpEncoding(Archive* AH); static void dumpStdStrings(Archive* AH); +static void DumpBehaviorCompat(Archive* archive); static void binary_upgrade_set_type_oids_by_type_oid( Archive* Afout, PQExpBuffer upgrade_buffer, Oid pg_type_oid, bool error_table); static bool binary_upgrade_set_type_oids_by_rel_oid( @@ -1131,6 +1132,9 @@ int main(int argc, char** argv) pthread_t progressThreadDumpProgress; pthread_create(&progressThreadDumpProgress, NULL, ProgressReportDump, NULL); + /* Set special option: behavior_compat_options */ + DumpBehaviorCompat(fout); + /* Now the rearrangeable objects. */ for (i = 0; i < numObjs; i++) { g_dumpObjNums++; @@ -4160,6 +4164,45 @@ static void dumpStdStrings(Archive* AH) destroyPQExpBuffer(qry); } +/* + * DumpBehaviorCompat: put the correct behavior_compat_options into the archive + */ +static void DumpBehaviorCompat(Archive* archive) +{ + ddr_Assert(archive != NULL); + PGconn* conn = GetConnection(archive); + PGresult* res; + PQExpBuffer qry = createPQExpBuffer(); + + res = PQexec(conn, "show behavior_compat_options;"); + if (res != NULL && PQresultStatus(res) == PGRES_TUPLES_OK) { + appendPQExpBuffer(qry, "SET behavior_compat_options = '%s';\n", PQgetvalue(res, 0, 0)); + } else { + appendPQExpBuffer(qry, "SET behavior_compat_options = '';\n"); + } + PQclear(res); + + ArchiveEntry(archive, + nilCatalogId, + createDumpId(), + "BEHAVIORCOMPAT", + NULL, + NULL, + "", + false, + "BEHAVIORCOMPAT", + SECTION_PRE_DATA, + qry->data, + "", + NULL, + NULL, + 0, + NULL, + NULL); + + destroyPQExpBuffer(qry); +} + /* * getBlobs: * Collect schema-level data about large objects diff --git a/src/test/regress/input/test_float_dump.source b/src/test/regress/input/test_float_dump.source index 48896516b0..2ff9412d93 100644 --- a/src/test/regress/input/test_float_dump.source +++ b/src/test/regress/input/test_float_dump.source @@ -26,5 +26,28 @@ select * from test_float order by c1; drop table test_float; +-- test dump custom +create user u_dmp password '1234@abcd'; +grant all privileges to u_dmp; + +create database db_restore; +ALTER DATABASE db_restore SET behavior_compat_options TO ''; + +\c test_float_dump +create table test_float (c1 int, c2 float(120)); +insert into test_float values (1, 3.14); +insert into test_float values (2, 1.79E+10); +insert into test_float values (3, -0.01); +select * from test_float order by c1; + +\! @abs_bindir@/gs_dump -p @portstring@ test_float_dump -f @abs_bindir@/t_dump -C -F c -w >/dev/null 2>&1; echo $? + +\! @abs_bindir@/gs_restore -h 127.0.0.1 -p @portstring@ -U u_dmp -W 1234@abcd -d db_restore -F c @abs_bindir@/t_dump + +\c db_restore +\d + \c postgres -drop database test_float_dump; \ No newline at end of file +drop database test_float_dump; +drop database db_restore; +drop user u_dmp cascade; \ No newline at end of file diff --git a/src/test/regress/output/test_float_dump.source b/src/test/regress/output/test_float_dump.source index bf4c26cd2e..4040337fe2 100644 --- a/src/test/regress/output/test_float_dump.source +++ b/src/test/regress/output/test_float_dump.source @@ -60,5 +60,41 @@ select * from test_float order by c1; (3 rows) drop table test_float; +-- test dump custom +create user u_dmp password '1234@abcd'; +grant all privileges to u_dmp; +create database db_restore; +ALTER DATABASE db_restore SET behavior_compat_options TO ''; +\c test_float_dump +create table test_float (c1 int, c2 float(120)); +insert into test_float values (1, 3.14); +insert into test_float values (2, 1.79E+10); +insert into test_float values (3, -0.01); +select * from test_float order by c1; + c1 | c2 +----+------------- + 1 | 3.14 + 2 | 17900000000 + 3 | -.01 +(3 rows) + +\! @abs_bindir@/gs_dump -p @portstring@ test_float_dump -f @abs_bindir@/t_dump -C -F c -w >/dev/null 2>&1; echo $? +--?.* +\! @abs_bindir@/gs_restore -h 127.0.0.1 -p @portstring@ -U u_dmp -W 1234@abcd -d db_restore -F c @abs_bindir@/t_dump +start restore operation ... +--?.* +end restore operation ... +restore operation successful +--?.* +\c db_restore +\d +--?.* +--?.* +--?.* +--?.* +(1 row) + \c postgres drop database test_float_dump; +drop database db_restore; +drop user u_dmp cascade; -- Gitee From 991f55d6fd1bcb7b6003e0b902f424bc2ba95ffe Mon Sep 17 00:00:00 2001 From: z00848344 Date: Fri, 23 Aug 2024 10:08:38 +0800 Subject: [PATCH 205/347] On branch pass_min_max Your branch is up to date with 'origin/pass_min_max'. Changes to be committed: modified: src/bin/gs_guc/cluster_guc.conf modified: src/common/backend/utils/misc/guc/guc_security.cpp --- src/bin/gs_guc/cluster_guc.conf | 1 + .../backend/utils/misc/guc/guc_security.cpp | 21 +++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index f37fc3102e..7270e459fd 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -319,6 +319,7 @@ sql_ignore_strategy|string|0,0|NULL|NULL| parctl_min_cost|int|-1,2147483647|NULL|NULL| io_control_unit|int|1000,1000000|NULL|NULL| io_limits|int|0,1073741823|NULL|NULL| +io_priority|enum|none,low,medium,high|NULL|NULL| gin_pending_list_limit|int|64,2147483647|kB|NULL| intervalstyle|enum|postgres,postgres_verbose,sql_standard,iso_8601,a|NULL|NULL| join_collapse_limit|int|1,2147483647|NULL|NULL| diff --git a/src/common/backend/utils/misc/guc/guc_security.cpp b/src/common/backend/utils/misc/guc/guc_security.cpp index 05628f159c..cb51b95c31 100755 --- a/src/common/backend/utils/misc/guc/guc_security.cpp +++ b/src/common/backend/utils/misc/guc/guc_security.cpp @@ -161,6 +161,8 @@ static bool check_ssl(bool* newval, void** extra, GucSource source); /* Database Security: Support password complexity */ static bool check_int_parameter(int* newval, void** extra, GucSource source); static bool check_ssl_ciphers(char** newval, void** extra, GucSource source); +static bool check_password_min_length(int* newval, void** extra, GucSource source); +static bool check_password_max_length(int* newval, void** extra, GucSource source); static void InitSecurityConfigureNamesBool(); static void InitSecurityConfigureNamesInt(); @@ -625,7 +627,7 @@ static void InitSecurityConfigureNamesInt() 8, 6, MAX_PASSWORD_LENGTH, - check_int_parameter, + check_password_min_length, NULL, NULL}, @@ -640,7 +642,7 @@ static void InitSecurityConfigureNamesInt() 32, 6, MAX_PASSWORD_LENGTH, - check_int_parameter, + check_password_max_length, NULL, NULL}, @@ -1447,3 +1449,18 @@ static bool check_ssl_ciphers(char** newval, void** extra, GucSource) return true; } +static bool check_password_min_length(int* newval, void** extra, GucSource source) +{ + if (*newval >= 0 && *newval <= u_sess->attr.attr_security.Password_max_length) { + return true; + } + return false; +} + +static bool check_password_max_length(int* newval, void** extra, GucSource source) +{ + if (*newval >= 0 && *newval >= u_sess->attr.attr_security.Password_min_length) { + return true; + } + return false; +} \ No newline at end of file -- Gitee From 327a0577af22e0d8cea6b3a1bd7fb4d547a31ceb Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Fri, 23 Aug 2024 11:26:33 +0800 Subject: [PATCH 206/347] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=EF=BC=9A=E4=BF=AE=E5=A4=8D=E4=BD=BF=E7=94=A8=E5=85=B3=E9=94=AE?= =?UTF-8?q?=E5=AD=97=E4=BD=9C=E4=B8=BA=E6=B8=B8=E6=A0=87=E5=90=8D=EF=BC=8C?= =?UTF-8?q?=E5=85=B3=E9=94=AE=E5=AD=97=E5=B8=A6=E5=8D=95=E5=BC=95=E5=8F=B7?= =?UTF-8?q?=E3=80=81=E5=8F=8D=E5=BC=95=E5=8F=B7=E6=8A=A5=E9=94=99=E4=BF=A1?= =?UTF-8?q?=E6=81=AF=E4=B8=8D=E5=90=88=E7=90=86=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parser.cpp | 55 +++++++++++-------- .../regress/expected/cursor_expression.out | 13 +++++ src/test/regress/sql/cursor_expression.sql | 8 +++ 3 files changed, 52 insertions(+), 24 deletions(-) diff --git a/src/common/backend/parser/parser.cpp b/src/common/backend/parser/parser.cpp index e4fe525536..f35542d0b0 100644 --- a/src/common/backend/parser/parser.cpp +++ b/src/common/backend/parser/parser.cpp @@ -741,30 +741,37 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) break; } break; - case CURSOR: - GET_NEXT_TOKEN(); - core_yystype_1 = cur_yylval; // the value of cursor - cur_yylloc_1 = cur_yylloc; // the lloc of cursor - next_token_1 = next_token; // the token after curosr - GET_NEXT_TOKEN(); - core_yystype_2 = cur_yylval; // the value after cursor - cur_yylloc_2 = cur_yylloc; // the lloc after cursor - next_token_2 = next_token; // the token after after curosr - - if (next_token_1 == '(' && (is_select_stmt_definitely(next_token))) { - PARSE_CURSOR_PARENTHESES_AS_EXPR(); - } else if (is_prefer_parse_cursor_parentheses_as_expr() && !is_cursor_function_exist()) { - PARSE_CURSOR_PARENTHESES_AS_EXPR(); - } else { - PARSE_CURSOR_PARENTHESES_AS_FUNCTION(); - } - - if (t_thrd.proc->workingVersionNum < CURSOR_EXPRESSION_VERSION_NUMBER && - cur_token == CURSOR_EXPR) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Unsupported feature: cursor expression during the upgrade"))); - } - break; + case CURSOR: + GET_NEXT_TOKEN(); + core_yystype_1 = cur_yylval; // the value of cursor + cur_yylloc_1 = cur_yylloc; // the lloc of cursor + next_token_1 = next_token; // the token after curosr + if (next_token_1 != '(') { + /* save the lookahead token for next time */ + SET_LOOKAHEAD_TOKEN(); + /* and back up the output info to cur_token */ + lvalp->core_yystype = cur_yylval; + *llocp = cur_yylloc; + } else { + GET_NEXT_TOKEN(); + core_yystype_2 = cur_yylval; // the value after cursor + cur_yylloc_2 = cur_yylloc; // the lloc after cursor + next_token_2 = next_token; // the token after after curosr + + if (next_token_1 == '(' && (is_select_stmt_definitely(next_token))) { + PARSE_CURSOR_PARENTHESES_AS_EXPR(); + } else if (is_prefer_parse_cursor_parentheses_as_expr() && !is_cursor_function_exist()) { + PARSE_CURSOR_PARENTHESES_AS_EXPR(); + } else { + PARSE_CURSOR_PARENTHESES_AS_FUNCTION(); + } + if (t_thrd.proc->workingVersionNum < CURSOR_EXPRESSION_VERSION_NUMBER && + cur_token == CURSOR_EXPR) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Unsupported feature: cursor expression during the upgrade"))); + } + } + break; default: break; } diff --git a/src/test/regress/expected/cursor_expression.out b/src/test/regress/expected/cursor_expression.out index a0b5eb6815..4d7bf1fcc8 100644 --- a/src/test/regress/expected/cursor_expression.out +++ b/src/test/regress/expected/cursor_expression.out @@ -978,7 +978,20 @@ NOTICE: CONTEXT: PL/pgSQL function inline_code_block line 10 at FETCH NOTICE: employee_name : zhangsan set enable_auto_explain = off; +create table abort_test(cid int,fid int); +-- expect error +start transaction; +cursor 'abort' for select * from abort_test order by 1; +ERROR: syntax error at or near "'abort'" +LINE 1: cursor 'abort' for select * from abort_test order by 1; + ^ +close 'abort'; +ERROR: syntax error at or near "'abort'" +LINE 1: close 'abort'; + ^ +commit; -- clean +drop table abort_test; drop table test_insert; drop procedure pro_cursor_0011_02; drop table t_cursor_0011_01; diff --git a/src/test/regress/sql/cursor_expression.sql b/src/test/regress/sql/cursor_expression.sql index 435c59fb3b..ac563df18b 100644 --- a/src/test/regress/sql/cursor_expression.sql +++ b/src/test/regress/sql/cursor_expression.sql @@ -550,7 +550,15 @@ END; / set enable_auto_explain = off; +create table abort_test(cid int,fid int); +-- expect error +start transaction; +cursor 'abort' for select * from abort_test order by 1; +close 'abort'; +commit; + -- clean +drop table abort_test; drop table test_insert; drop procedure pro_cursor_0011_02; drop table t_cursor_0011_01; -- Gitee From f7c7210363b4ad0703043e18ac07f62325ec9d16 Mon Sep 17 00:00:00 2001 From: lyannaa <1016943941@qq.com> Date: Thu, 22 Aug 2024 14:58:49 +0800 Subject: [PATCH 207/347] redo page code refactor --- src/common/backend/utils/misc/guc/guc_sql.cpp | 2 +- .../optimizer/commands/trigger.cpp | 6 +- src/gausskernel/optimizer/commands/vacuum.cpp | 8 +-- .../runtime/executor/execUtils.cpp | 10 --- .../runtime/executor/nodeModifyTable.cpp | 46 +++++++------- .../storage/access/redo/redo_heapam.cpp | 62 ++++++++++++------- .../storage/access/transam/double_write.cpp | 4 +- .../storage/access/transam/xlog.cpp | 4 +- .../access/ustore/knl_uextremeredo.cpp | 21 ++++--- .../storage/access/ustore/knl_uheap.cpp | 3 +- .../storage/access/ustore/knl_upage.cpp | 4 +- .../storage/access/ustore/knl_uredo.cpp | 19 +++--- .../storage/access/ustore/knl_utuple.cpp | 2 +- .../storage/access/ustore/knl_uvacuumlazy.cpp | 14 ++--- .../storage/access/ustore/knl_uvisibility.cpp | 1 - .../access/ustore/undo/knl_uundorecycle.cpp | 2 +- src/gausskernel/storage/lmgr/lwlock.cpp | 2 +- src/include/access/double_write_basic.h | 2 +- src/include/access/heapam.h | 12 ++++ src/include/access/ustore/knl_upage.h | 16 +++++ 20 files changed, 139 insertions(+), 101 deletions(-) diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index 4e29c8b518..b25ee097bb 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -2974,7 +2974,7 @@ static void InitSqlConfigureNamesString() PGC_USERSET, NODE_ALL, CLIENT_CONN, - gettext_noop("Configure UStore optimizations."), + gettext_noop("Configure Ustore optimizations."), NULL, GUC_LIST_INPUT | GUC_LIST_QUOTE}, &u_sess->attr.attr_sql.ustore_attr, diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index beb856e8e0..fb27a15868 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -3153,7 +3153,7 @@ HeapTuple GetTupleForTrigger(EState* estate, EPQState* epqstate, ResultRelInfo* return NULL; /* keep compiler quiet */ } } else { - Page page; + Page page; buffer = ReadBuffer(RELATION_IS_PARTITIONED(relation) ? fakeRelation : relation, ItemPointerGetBlockNumber(tid)); @@ -3168,12 +3168,12 @@ HeapTuple GetTupleForTrigger(EState* estate, EPQState* epqstate, ResultRelInfo* LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer); - RowPtr *rp = UPageGetRowPtr(page, ItemPointerGetOffsetNumber(tid)); + RowPtr *rp = UPageGetRowPtr(page, ItemPointerGetOffsetNumber(tid)); UHeapDiskTuple diskTuple = NULL; Assert(RowPtrIsUsed(rp)); - diskTuple = (UHeapDiskTuple) UPageGetRowData(page, rp); + diskTuple = (UHeapDiskTuple)UPageGetRowData(page, rp); uheaptupdata.disk_tuple_size = rp->len; errorNo = memcpy_s((char*)uheaptupdata.disk_tuple, rp->len, (char*)diskTuple, rp->len); securec_check(errorNo, "\0", "\0"); diff --git a/src/gausskernel/optimizer/commands/vacuum.cpp b/src/gausskernel/optimizer/commands/vacuum.cpp index df6f632ec7..95305f6729 100644 --- a/src/gausskernel/optimizer/commands/vacuum.cpp +++ b/src/gausskernel/optimizer/commands/vacuum.cpp @@ -1999,12 +1999,12 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) } if (OidIsValid(relationid)) { - Relation parent_rel = try_relation_open(relationid, NoLock); - if (parent_rel != NULL) { - if (RelationIsUstoreFormat(parent_rel) && parent_rel->rd_rel->relhasindex) { + Relation parentRel = try_relation_open(relationid, NoLock); + if (parentRel != NULL) { + if (RelationIsUstoreFormat(parentRel) && parentRel->rd_rel->relhasindex) { lmodePartTable = ExclusiveLock; } - relation_close(parent_rel, NoLock); + relation_close(parentRel, NoLock); } } diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp index ff6377c406..71b4a51f45 100644 --- a/src/gausskernel/runtime/executor/execUtils.cpp +++ b/src/gausskernel/runtime/executor/execUtils.cpp @@ -2152,16 +2152,6 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e } } -#ifdef USE_ASSERT_CHECKING - if (ispartitionedtable && RelationIsGlobalIndex(indexRelation)) { - if (RelationIsUstoreFormat(heapRelation)) { - Assert(((UHeapTuple)slot->tts_tuple)->table_oid != InvalidOid); - } else { - Assert(((HeapTuple)slot->tts_tuple)->t_tableOid != InvalidOid); - } - } -#endif - /* * FormIndexDatum fills in its values and isnull parameters with the * appropriate values for the column(s) of the index. diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp index c52624ffce..b3b1f65df0 100644 --- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp +++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp @@ -860,35 +860,31 @@ static void ConstraintsForExecUpsert(Relation resultRelationDesc) } } -static void update_slot_tuple_info(TupleTableSlot* slot, Tuple tuple) +static void UpdateSlotTupleInfo(TupleTableSlot* slot, Tuple tuple) { - bool tuple_is_uheap = TUPLE_IS_UHEAP_TUPLE(tuple); - if (slot->tts_tupslotTableAm == TAM_USTORE && !tuple_is_uheap) { - UHeapTuple slot_tup = (UHeapTuple) slot->tts_tuple; - HeapTuple htup = (HeapTuple) tuple; - slot_tup->ctid = htup->t_self; - slot_tup->table_oid = htup->t_tableOid; - slot_tup->t_xid_base = htup->t_xid_base; - slot_tup->t_multi_base = htup->t_multi_base; - slot_tup->xmin = htup->xmin; - slot_tup->xmax = htup->xmax; - slot_tup->xc_node_id = htup->t_xc_node_id; - } else if (slot->tts_tupslotTableAm == TAM_HEAP && tuple_is_uheap) { - HeapTuple slot_tup = (HeapTuple) slot->tts_tuple; - UHeapTuple utup = (UHeapTuple) tuple; - slot_tup->t_self = utup->ctid; - slot_tup->t_tableOid = utup->table_oid; - slot_tup->t_xid_base = utup->t_xid_base; - slot_tup->t_multi_base = utup->t_multi_base; - slot_tup->xmin = utup->xmin; - slot_tup->xmax = utup->xmax; - slot_tup->t_xc_node_id = utup->xc_node_id; + bool isUheapTuple = TUPLE_IS_UHEAP_TUPLE(tuple); + if (!isUheapTuple && slot->tts_tupslotTableAm == TAM_USTORE) { + UHeapTuple tup = (UHeapTuple)slot->tts_tuple; + HeapTuple htup = (HeapTuple)tuple; + tup->ctid = htup->t_self; + tup->table_oid = htup->t_tableOid; + tup->t_xid_base = htup->t_xid_base; + tup->t_multi_base = htup->t_multi_base; + tup->xmin = htup->xmin; + tup->xmax = htup->xmax; } else { - Assert(false); + Assert(isUheapTuple && slot->tts_tupslotTableAm == TAM_HEAP); + HeapTuple tup = (HeapTuple)slot->tts_tuple; + UHeapTuple utup = (UHeapTuple)tuple; + tup->t_self = utup->ctid; + tup->t_tableOid = utup->table_oid; + tup->t_xid_base = utup->t_xid_base; + tup->t_multi_base = utup->t_multi_base; + tup->xmin = utup->xmin; + tup->xmax = utup->xmax; } } - static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableSlot* planSlot, EState* estate, bool canSetTag, Tuple tuple, TupleTableSlot** returning, bool* updated, Oid* targetPartOid, char* partExprKeyStr) { @@ -1026,7 +1022,7 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS newid = tableam_tuple_insert(targetrel, tuple, estate->es_output_cid, 0, NULL); if (slot->tts_tuple != tuple) { - update_slot_tuple_info(slot, tuple); + UpdateSlotTupleInfo(slot, tuple); } /* insert index entries for tuple */ diff --git a/src/gausskernel/storage/access/redo/redo_heapam.cpp b/src/gausskernel/storage/access/redo/redo_heapam.cpp index bd9df24908..6bd24a604a 100755 --- a/src/gausskernel/storage/access/redo/redo_heapam.cpp +++ b/src/gausskernel/storage/access/redo/redo_heapam.cpp @@ -281,10 +281,13 @@ void HeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Transa ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum); OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - if (maxoff >= xlrec->offnum) + if (maxoff >= xlrec->offnum) { lp = PageGetItemId(page, xlrec->offnum); + } else { + PagePrintErrorInfo(page, "The max offset number is invalid"); + } - if (maxoff < xlrec->offnum || !ItemIdIsNormal(lp)) + if (!ItemIdIsNormal(lp)) ereport(PANIC, (errmsg("heap_delete_redo: invalid lp"))); htup = (HeapTupleHeader)PageGetItem(page, lp); @@ -371,8 +374,9 @@ void HeapXlogInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, bool i OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - if (maxoff + 1 < xlrec->offnum) - ereport(PANIC, (errmsg("heap_insert_redo: invalid max offset number"))); + if (maxoff + 1 < xlrec->offnum) { + PagePrintErrorInfo(page, "The max offset number is invalid"); + } newlen = datalen - SizeOfHeapHeader; Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize); @@ -394,8 +398,9 @@ void HeapXlogInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, bool i HeapTupleHeaderSetCmin(htup, FirstCommandId); htup->t_ctid = target_tid; - if (PageAddItem(page, (Item)htup, newlen, xlrec->offnum, true, true) == InvalidOffsetNumber) - ereport(PANIC, (errmsg("heap_insert_redo: failed to add tuple"))); + if (PageAddItem(page, (Item)htup, newlen, xlrec->offnum, true, true) == InvalidOffsetNumber) { + PagePrintErrorInfo(page, "heap_insert_redo: failed to add tuple"); + } if (freespace != NULL) { *freespace = PageGetHeapFreeSpace(page); @@ -473,8 +478,9 @@ void HeapXlogMultiInsertOperatorPage(RedoBufferInfo *buffer, const void *recored offnum = xlrec->offsets[i]; maxoff = PageGetMaxOffsetNumber(page); - if (maxoff + 1 < offnum) - ereport(PANIC, (errmsg("heap_multi_insert_redo: invalid max offset number"))); + if (maxoff + 1 < offnum) { + PagePrintErrorInfo(page, "heap_multi_insert_redo: The max offset number is invalid"); + } xlhdr = (xl_multi_insert_tuple *)SHORTALIGN(tupdata); tupdata = ((char *)xlhdr) + SizeOfMultiInsertTuple; @@ -499,11 +505,13 @@ void HeapXlogMultiInsertOperatorPage(RedoBufferInfo *buffer, const void *recored ItemPointerSetOffsetNumber(&htup->t_ctid, offnum); offnum = PageAddItem(page, (Item)htup, newlen, offnum, true, true); - if (offnum == InvalidOffsetNumber) - ereport(PANIC, (errmsg("heap_multi_insert_redo: failed to add tuple"))); + if (offnum == InvalidOffsetNumber) { + PagePrintErrorInfo(page, "heap_multi_insert_redo: failed to add tuple"); + } + } + if (tupdata != endptr) { + PagePrintErrorInfo(page, "heap_multi_insert_redo: total tuple length mismatch"); } - if (tupdata != endptr) - ereport(PANIC, (errmsg("heap_multi_insert_redo: total tuple length mismatch"))); if (freespace != NULL) { *freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */ } @@ -537,11 +545,15 @@ void HeapXlogUpdateOperatorOldpage(RedoBufferInfo *buffer, void *recoreddata, bo ItemPointerSet(&newtid, newblk, xlrec->new_offnum); OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - if (maxoff >= xlrec->old_offnum) + if (maxoff >= xlrec->old_offnum) { lp = PageGetItemId(page, xlrec->old_offnum); + } else { + PagePrintErrorInfo(page, "The max offset number is invalid"); + } - if (maxoff < xlrec->old_offnum || !ItemIdIsNormal(lp)) - ereport(PANIC, (errmsg("heap_update_redo: invalid lp"))); + if (!ItemIdIsNormal(lp)) { + PagePrintErrorInfo(page, "heap_update_redo: invalid lp"); + } htup = (HeapTupleHeader)PageGetItem(page, lp); @@ -653,8 +665,9 @@ void HeapXlogUpdateOperatorNewpage(RedoBufferInfo *buffer, void *recorddata, boo newphdr->pd_special, xlrec->new_offnum, PageGetMaxOffsetNumber(page)))); } - if (maxoff + 1 < xlrec->new_offnum) - ereport(PANIC, (errmsg("heap_update_redo: invalid max offset number"))); + if (maxoff + 1 < xlrec->new_offnum) { + PagePrintErrorInfo(page, "heap_update_redo: invalid max offset number"); + } Assert(tuplen <= MaxHeapTupleSize); @@ -677,8 +690,9 @@ void HeapXlogUpdateOperatorNewpage(RedoBufferInfo *buffer, void *recorddata, boo /* Make sure there is no forward chain link in t_ctid */ htup->t_ctid = newtid; - if (PageAddItem(page, (Item)htup, newlen, xlrec->new_offnum, true, true) == InvalidOffsetNumber) - ereport(PANIC, (errmsg("heap_update_redo: failed to add tuple"))); + if (PageAddItem(page, (Item)htup, newlen, xlrec->new_offnum, true, true) == InvalidOffsetNumber) { + PagePrintErrorInfo(page, "heap_update_redo: failed to add tuple"); + } if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) PageClearAllVisible(page); @@ -755,11 +769,15 @@ void HeapXlogInplaceOperatorPage(RedoBufferInfo *buffer, void *recorddata, void ereport(PANIC, (errmsg("heap_inplace_redo: no tuple data"))); maxoff = PageGetMaxOffsetNumber(page); - if (maxoff >= xlrec->offnum) + if (maxoff >= xlrec->offnum) { lp = PageGetItemId(page, xlrec->offnum); + } else { + PagePrintErrorInfo(page, "The max offset number is invalid"); + } - if (maxoff < xlrec->offnum || !ItemIdIsNormal(lp)) - ereport(PANIC, (errmsg("heap_inplace_redo: invalid lp"))); + if (!ItemIdIsNormal(lp)) { + PagePrintErrorInfo(page, "heap_inplace_redo: invalid lp"); + } htup = (HeapTupleHeader)PageGetItem(page, lp); diff --git a/src/gausskernel/storage/access/transam/double_write.cpp b/src/gausskernel/storage/access/transam/double_write.cpp index 86fbc42281..f0c8496ccd 100644 --- a/src/gausskernel/storage/access/transam/double_write.cpp +++ b/src/gausskernel/storage/access/transam/double_write.cpp @@ -2486,8 +2486,8 @@ bool free_space_enough(int buf_id) BufferDesc *buf_desc = GetBufferDescriptor(buf_id); BufferTag* tag = &buf_desc->tag; - /* only the heap page's pd_lower and pd_upper space is valid to store the buftag */ - if (tag->forkNum != MAIN_FORKNUM || IS_UNDO_RECORD_BUFFER(tag->rnode) || IS_TRANS_SLOT_BUFFER(tag->rnode)) { + /* only the heap page's pd_lower and pd_upper space is valid to save buf_tag */ + if (tag->forkNum != MAIN_FORKNUM || IS_UNDO_RECORD_BUFFER(tag->rnode) || IS_UNDO_SLOT_BUFFER(tag->rnode)) { return false; } diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index ed746035e7..d1c1d6cbdb 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -17038,8 +17038,8 @@ void rm_redo_error_callback(void *arg) initStringInfo(&buf); RmgrTable[XLogRecGetRmid(record)].rm_desc(&buf, record); - errcontext("xlog redo at lsn %X/%X, %s", (uint32)(record->EndRecPtr >> XLOG_LSN_SWAP), - (uint32)record->EndRecPtr, buf.data); + errcontext("xlog redo lsn %X/%X, %s", + (uint32)(record->EndRecPtr >> XLOG_LSN_SWAP), (uint32)record->EndRecPtr, buf.data); pfree_ext(buf.data); } diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp index cfc9d5f83c..ec72f15af0 100644 --- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp @@ -567,13 +567,15 @@ void UHeapXlogInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, bool ItemPointerSetOffsetNumber(&targetTid, xlrec->offnum); OffsetNumber maxoff = UHeapPageGetMaxOffsetNumber(page); - if (maxoff + 1 < xlrec->offnum) - ereport(PANIC, (errmsg("UHeapXlogInsertOperatorPage: invalid max offset number"))); + if (maxoff + 1 < xlrec->offnum) { + UPagePrintErrorInfo(page, "UHeapXlogInsertOperatorPage: invalid max offset number"); + } bufpage.buffer = buffer->buf; bufpage.page = NULL; - if (UPageAddItem(NULL, &bufpage, (Item)utup, newlen, xlrec->offnum, true) == InvalidOffsetNumber) - ereport(PANIC, (errmsg("UHeapXlogInsertOperatorPage: failed to add tuple"))); + if (UPageAddItem(NULL, &bufpage, (Item)utup, newlen, xlrec->offnum, true) == InvalidOffsetNumber) { + UPagePrintErrorInfo(page, "UHeapXlogInsertOperatorPage: failed to add tuple"); + } UHeapRecordPotentialFreeSpace(buffer->buf, -1 * SHORTALIGN(newlen)); @@ -631,7 +633,7 @@ void UHeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Size if (maxoff >= xlrec->offnum) { rp = UPageGetRowPtr(page, xlrec->offnum); } else { - elog(PANIC, "UHeapXlogDeleteOperatorPage: xlog delete offset is greater than the max offset on page"); + UPagePrintErrorInfo(page, "UHeapXlogDeleteOperatorPage: xlog delete offset is greater than the max offset on page"); } /* increment the potential freespace of this page */ @@ -664,7 +666,7 @@ void UHeapXlogUpdateOperatorOldpage(UpdateRedoBuffers* buffers, void *recorddata if (UHeapPageGetMaxOffsetNumber(oldpage) >= xlrec->old_offnum) { rp = UPageGetRowPtr(oldpage, xlrec->old_offnum); } else { - elog(PANIC, "invalid rp"); + UPagePrintErrorInfo(oldpage, "UHeap Update Oldpage:The max offset number is invalid"); } ItemPointerData oldtid, newtid; @@ -917,7 +919,7 @@ Size UHeapXlogUpdateOperatorNewpage(UpdateRedoBuffers* buffers, void *recorddata } else { UHeapBufferPage bufpage = {newbuf, NULL}; if (UPageAddItem(NULL, &bufpage, (Item)newtup, newlen, xlrec->new_offnum, true) == InvalidOffsetNumber) { - elog(PANIC, "failed to add tuple"); + UPagePrintErrorInfo(newpage, "UPageAddItem failed"); } /* Update the page potential freespace */ @@ -1078,13 +1080,16 @@ void UHeapXlogMultiInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, /* max offset should be valid */ Assert(UHeapPageGetMaxOffsetNumber(page) + 1 >= offnum); + if (UHeapPageGetMaxOffsetNumber(page) + 1 < offnum) { + UPagePrintErrorInfo(page, "The max offset number is invalid"); + } UHeapDiskTuple uhtup = GetUHeapDiskTupleFromMultiInsertRedoData(&data, &newlen, tbuf); bufpage.buffer = buffer->buf; bufpage.page = NULL; if (UPageAddItem(NULL, &bufpage, (Item)uhtup, newlen, offnum, true) == InvalidOffsetNumber) { - elog(PANIC, "failed to add tuple"); + UPagePrintErrorInfo(page, "UPageAddItem failed"); } /* decrement the potential freespace of this page */ diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index ea4aa14f78..f46ebd5ac2 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -3064,7 +3064,8 @@ check_tup_satisfies_update: undoXorDeltaSize += sizeof(uint16); } - /* The first sizeof(uint8) is space for t_hoff and the second sizeof(uint8) is space for prefix and suffix flag */ + /* The first sizeof(uint8) is space for t_hoff and the second sizeof(uint8) is space for prefix and suffix flag + */ undoXorDeltaSize += sizeof(uint8) + oldtup.disk_tuple->t_hoff - OffsetTdId + sizeof(uint8); undoXorDeltaSize += oldlen - prefixlen - suffixlen; diff --git a/src/gausskernel/storage/access/ustore/knl_upage.cpp b/src/gausskernel/storage/access/ustore/knl_upage.cpp index ca8947dd08..f23451476e 100644 --- a/src/gausskernel/storage/access/ustore/knl_upage.cpp +++ b/src/gausskernel/storage/access/ustore/knl_upage.cpp @@ -522,8 +522,8 @@ LocateUsableItemIds(Page page, UHeapTuple *tuples, int ntuples, Size saveFreeSpa } if (!RowPtrIsUsed(rp) && !RowPtrHasStorage(rp)) { - UHeapTuple uheaptup = tuples[*nthispage]; - Size neededSpace; + UHeapTuple uheaptup = tuples[*nthispage]; + Size neededSpace; if (isFirstInsert) { neededSpace = *usedSpace + uheaptup->disk_tuple_size; isFirstInsert = false; diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index cdcda5d982..b275d4debf 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -40,7 +40,6 @@ using namespace undo; static const int FREESPACE_FRACTION = 5; -static const int HIGH_BITS_LEN_OF_LSN = 32; union TupleBuffer { UHeapDiskTupleData hdr; @@ -227,7 +226,7 @@ static void PerformInsertRedoAction(XLogReaderState *record, const Buffer buf, c Size newlen = datalen - SizeOfUHeapHeader; Page page = BufferGetPage(buf); if (UHeapPageGetMaxOffsetNumber(page) + 1 < xlrec->offnum) { - elog(PANIC, "Invalid max offset number"); + UPagePrintErrorInfo(page, "The max offset number is invalid"); } /* @@ -243,7 +242,7 @@ static void PerformInsertRedoAction(XLogReaderState *record, const Buffer buf, c bufpage.page = NULL; FastVerifyUTuple(utup, InvalidBuffer); if (UPageAddItem(NULL, &bufpage, (Item)utup, newlen, xlrec->offnum, true) == InvalidOffsetNumber) { - elog(PANIC, "Failed to add tuple"); + UPagePrintErrorInfo(page, "UPageAddItem failed"); } /* decrement the potential freespace of this page */ @@ -421,7 +420,7 @@ static void PerformDeleteRedoAction(XLogReaderState *record, UHeapTupleData *utu if (UHeapPageGetMaxOffsetNumber(page) >= xlrec->offnum) { rp = UPageGetRowPtr(page, xlrec->offnum); } else { - elog(PANIC, "invalid rp"); + UPagePrintErrorInfo(page, "The max offset number is invalid"); } /* increment the potential freespace of this page */ @@ -1019,7 +1018,7 @@ static void PerformUpdateOldRedoAction(XLogReaderState *record, UHeapTupleData * if (UHeapPageGetMaxOffsetNumber(oldpage) >= xlrec->old_offnum) { rp = UPageGetRowPtr(oldpage, xlrec->old_offnum); } else { - elog(PANIC, "Invalid max offset number"); + UPagePrintErrorInfo(oldpage, "The max offset number is invalid"); } /* Ensure old tuple points to the tuple in page. */ @@ -1193,14 +1192,16 @@ static Size PerformUpdateNewRedoAction(XLogReaderState *record, UpdateRedoBuffer Page newpage = buffers->newbuffer.pageinfo.page; /* max offset number should be valid */ - Assert(UHeapPageGetMaxOffsetNumber(newpage) + 1 >= xlrec->new_offnum); + if (UHeapPageGetMaxOffsetNumber(newpage) + 1 < xlrec->new_offnum) { + UPagePrintErrorInfo(newpage, "The max offset number is invalid"); + } if (xlrec->flags & XLZ_NON_INPLACE_UPDATE) { UHeapBufferPage bufpage = {newbuf, NULL}; if (UPageAddItem(NULL, &bufpage, (Item)tuples->newtup, newlen, xlrec->new_offnum, true) == InvalidOffsetNumber) { - elog(PANIC, "Failed to add tuple"); + UPagePrintErrorInfo(newpage, "UPageAddItem failed"); } /* Update the page potential freespace */ @@ -1576,7 +1577,7 @@ static void PerformMultiInsertRedoAction(XLogReaderState *record, XlUHeapMultiIn /* max offset should be valid */ if (UHeapPageGetMaxOffsetNumber(page) + 1 < offnum) { - elog(PANIC, "Invalid max offset number"); + UPagePrintErrorInfo(page, "The max offset number is invalid"); } UHeapDiskTuple uhtup = GetUHeapDiskTupleFromMultiInsertRedoData(&tupdata, &newlen, tbuf); @@ -1584,7 +1585,7 @@ static void PerformMultiInsertRedoAction(XLogReaderState *record, XlUHeapMultiIn bufpage.buffer = buffer->buf; bufpage.page = NULL; if (UPageAddItem(NULL, &bufpage, (Item)uhtup, newlen, offnum, true) == InvalidOffsetNumber) { - elog(PANIC, "Failed to add tuple"); + UPagePrintErrorInfo(page, "UPageAddItem failed,"); } /* decrement the potential freespace of this page */ diff --git a/src/gausskernel/storage/access/ustore/knl_utuple.cpp b/src/gausskernel/storage/access/ustore/knl_utuple.cpp index 2e84ec64f3..67464b2f62 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuple.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuple.cpp @@ -1136,7 +1136,7 @@ Datum UHeapNoCacheGetAttr(UHeapTuple tuple, uint32 attnum, TupleDesc tupleDesc) bits8 *bp = tup->data; /* ptr to null bitmap in tuple */ bool slow = false; /* do we have to walk attrs? */ int hoff = tup->t_hoff; /* header length on tuple data */ - int off = hoff; /* current offset within data */ + int off = hoff; /* current offset within data */ bool hasnulls = UHeapDiskTupHasNulls(tup); /* ---------------- diff --git a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp index 7a9e2a05c0..2dda233752 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp @@ -564,14 +564,14 @@ static void LazyScanURel(Relation onerel, LVRelStats *vacrelstats, VacuumStmt *v /* - * ForceVacuumUHeapRelBypass() -- deal with force AutoVacuum for UStore + * ForceVacuumUHeapRelBypass() -- deal with force AutoVacuum for Ustore * - * AutoVacuum will only apply to UStore by force vacuum for recycle clog + * AutoVacuum will only apply to Ustore by force vacuum for recycle clog * * we just need to get a FrozenXid for this relation that all clog of xid less than * that FrozenXid can be safely recycled. * - * Note: UStore with undo module can use oldestXidInUndo to ensure that every xid we + * Note: Ustore with undo module can use oldestXidInUndo to ensure that every xid we * found in heap page less than that value is already committed, otherwise we * won't find it (rollback is already performed). */ @@ -579,8 +579,8 @@ void ForceVacuumUHeapRelBypass(Relation onerel, VacuumStmt *vacstmt, BufferAcces { Assert(IsAutoVacuumWorkerProcess()); ereport(LOG, (errmodule(MOD_AUTOVAC), errcode(ERRCODE_LOG), - errmsg("start force vacuum index of table %s, oid %u, globalRecycleXid %lu," - "RecentGlobalDataXmin %lu, nextXid %lu", + errmsg("ForceVacuumUHeapRelBypass Begin: Table %s, oid %u" + "GlobalRecycleXid %lu, RecentGlobalDataXmin %lu, NextXid %lu", onerel ? RelationGetRelationName(onerel) : "unknown", onerel ? onerel->rd_id : InvalidOid, pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid), @@ -678,8 +678,8 @@ void ForceVacuumUHeapRelBypass(Relation onerel, VacuumStmt *vacstmt, BufferAcces pfree_ext(vacrelstats); ereport(LOG, (errmodule(MOD_AUTOVAC), errcode(ERRCODE_LOG), - errmsg("start force vacuum index of table %s, oid %u, globalRecycleXid %lu," - "RecentGlobalDataXmin %lu, nextXid %lu", + errmsg("ForceVacuumUHeapRelBypass End: table %s, oid %u" + "GlobalRecycleXid %lu," "RecentGlobalDataXmin %lu, NextXid %lu", onerel ? RelationGetRelationName(onerel) : "unknown", onerel ? onerel->rd_id : InvalidOid, pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid), diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index 170277d225..7328acf0a5 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -1176,7 +1176,6 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer utuple->table_oid = RelationGetRelid(rel); utuple->ctid = *tid; - utuple->xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; *inplaceUpdated = false; if (ctid != NULL) { diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index 530096fcc6..f62aef7fe4 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -886,7 +886,7 @@ void UndoRecycleMain() "from %u to %u."), t_thrd.undorecycler_cxt.is_recovery_in_progress, is_in_progress))); ShutDownRecycle(recycleMaxXIDs); } - if ((!t_thrd.undorecycler_cxt.is_recovery_in_progress) && (!IS_EXRTO_STANDBY_READ)) { + if (!t_thrd.undorecycler_cxt.is_recovery_in_progress && !IS_EXRTO_STANDBY_READ) { TransactionId recycleXmin = InvalidTransactionId; TransactionId oldestXmin = GetOldestXminForUndo(&recycleXmin); if (!TransactionIdIsValid(recycleXmin) || diff --git a/src/gausskernel/storage/lmgr/lwlock.cpp b/src/gausskernel/storage/lmgr/lwlock.cpp index 80cc97addd..d84b43345d 100644 --- a/src/gausskernel/storage/lmgr/lwlock.cpp +++ b/src/gausskernel/storage/lmgr/lwlock.cpp @@ -853,7 +853,7 @@ static void LWThreadSuicide(PGPROC *proc, int extraWaits, LWLock *lock, LWLockMo } instr_stmt_report_lock(LWLOCK_WAIT_END); LWLockReportWaitFailed(lock); - /* reset victim flag, we may acquire lock again at fatal process */ + /* Reset victim flag, we may acquire lock again at fatal process */ proc->lwIsVictim = false; ereport(FATAL, (errmsg("force thread %lu to exit because of lwlock deadlock", proc->pid), errdetail("Lock Info: (%s), mode %d", T_NAME(lock), mode))); diff --git a/src/include/access/double_write_basic.h b/src/include/access/double_write_basic.h index cd5d457cf2..a0502ad632 100644 --- a/src/include/access/double_write_basic.h +++ b/src/include/access/double_write_basic.h @@ -133,7 +133,7 @@ const static uint32 DW_VIEW_COL_NAME_LEN = 32; #define DW_PAGE_CHECKSUM(page) (DW_PAGE_TAIL(page)->checksum) #define IS_UNDO_RECORD_BUFFER(rnode) (((rnode).dbNode) == (UNDO_DB_OID)) -#define IS_TRANS_SLOT_BUFFER(rnode) (((rnode).dbNode) == (UNDO_SLOT_DB_OID)) +#define IS_UNDO_SLOT_BUFFER(rnode) (((rnode).dbNode) == (UNDO_SLOT_DB_OID)) #ifdef DW_DEBUG #define DW_LOG_LEVEL LOG diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index c42129994b..fe22661513 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -36,6 +36,7 @@ #define HEAP_INSERT_SPECULATIVE 0x0008 #define HEAP_INSERT_SKIP_ERROR 0x0010 #define HEAP_INSERT_SPLIT_PARTITION 0x0020 +#define XLOG_HEAP_LSN_HIGH_OFF 32 /* ---------------------------------------------------------------- * Scan State Information @@ -188,6 +189,17 @@ static const struct { #define ConditionalLockTupleTuplock(_rel, _tup, _mode) \ ConditionalLockTuple((_rel), (_tup), TupleLockExtraInfo[_mode].hwlock) +#define PagePrintErrorInfo(_page, _msg) \ + do { \ + PageHeader pageHeader = (PageHeader)page; \ + elog(PANIC, \ + "%s, PageHeaderInfo: pd_lsn:%X/%X, pd_checksum:%u, pd_flags:%u, " \ + "pd_lower:%u, pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, pd_prune_xid:%u", \ + _msg, pageHeader->pd_lsn.xlogid, \ + pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF + pageHeader->pd_lsn.xrecoff, \ + pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ + pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->pd_prune_xid); \ + } while (0) /* * This table maps tuple lock strength values for each particular * MultiXactStatus value. diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index 2ef78734ab..fc492f8cf6 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -34,6 +34,7 @@ #define UHP_ALL_VISIBLE 0x0004 /* all tuples on page are visible to \ * everyone */ #define UHEAP_VALID_FLAG_BITS 0xFFFF /* OR of all valid flag bits */ +#define XLOG_UHEAP_LSN_HIGH_OFF 32 #define UPageHasFreeLinePointers(_page) (((UHeapPageHeaderData *)(_page))->pd_flags & UHEAP_HAS_FREE_LINES) #define UPageSetHasFreeLinePointers(_page) (((UHeapPageHeaderData *)(_page))->pd_flags |= UHEAP_HAS_FREE_LINES) @@ -214,6 +215,21 @@ #define UPageGetPruneXID(_page) ((UHeapPageHeaderData)(_page))->pd_prune_xid +#define UPagePrintErrorInfo(_page, _msg) \ + do { \ + UHeapPageHeaderData *pageHeader = (UHeapPageHeaderData *)_page; \ + elog(PANIC, \ + "%s, UPageHeaderInfo: pd_lsn:%X/%X, pd_checksum:%u, " \ + "pd_flags:%u, pd_lower:%u, " \ + "pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, potential_freespace:%u, td_count:%u, " \ + "pd_prune_xid:%lu, pd_xid_base:%lu, pd_multi_base:%lu" _msg, \ + pageHeader->pd_lsn.xlogid, \ + pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF + pageHeader->pd_lsn.xrecoff, \ + pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ + pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->potential_freespace, \ + pageHeader->td_count, pageHeader->pd_prune_xid, pageHeader->pd_xid_base, pageHeader->pd_multi_base); \ + } while (0) + const uint8 UHEAP_DEFAULT_TOAST_TD_COUNT = 4; const uint8 UHEAP_MAX_ATTR_PAD = 3; /* -- Gitee From 29481a478b1e466a8a78e6be7d38f253272f9790 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Sat, 24 Aug 2024 08:39:00 +0800 Subject: [PATCH 208/347] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=EF=BC=9A=E4=BF=AE=E5=A4=8D=E6=89=A7=E8=A1=8Cregexp=5Fcount?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E6=98=AFctr+c=E6=97=A0=E6=B3=95=E6=8E=90?= =?UTF-8?q?=E6=96=AD=E7=9A=84=E9=97=AE=E9=A2=98=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/regexp.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/common/backend/utils/adt/regexp.cpp b/src/common/backend/utils/adt/regexp.cpp index c9798130d3..2857195a33 100644 --- a/src/common/backend/utils/adt/regexp.cpp +++ b/src/common/backend/utils/adt/regexp.cpp @@ -1222,6 +1222,7 @@ static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, /* search for the pattern, perhaps repeatedly */ prev_match_end = 0; while (RE_wchar_execute(cpattern, wide_str, wide_len, start_search, pmatch_len, pmatch)) { + CHECK_FOR_INTERRUPTS(); /* * If requested, ignore degenerate matches, which are zero-length * matches occurring at the start or end of a string or just after a -- Gitee From d1cf22ed9133386e12be4c40ff92513c0d49613c Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Sat, 24 Aug 2024 11:08:08 +0800 Subject: [PATCH 209/347] add pg_archivecleanup tool --- build/script/aarch64_finance_list | 1 + build/script/aarch64_opengauss_list | 1 + build/script/utils/make_compile.sh | 7 +++++++ build/script/x86_64_finance_list | 1 + build/script/x86_64_opengauss_list | 1 + 5 files changed, 11 insertions(+) diff --git a/build/script/aarch64_finance_list b/build/script/aarch64_finance_list index 608f59ec6a..9873ce2595 100644 --- a/build/script/aarch64_finance_list +++ b/build/script/aarch64_finance_list @@ -47,6 +47,7 @@ ./bin/gs_plan_simulator.sh ./bin/pg_xlogdump ./bin/pagehack +./bin/pg_archivecleanup ./bin/gs_assessment ./bin/gs_retrieve ./etc/kerberos/kadm5.acl diff --git a/build/script/aarch64_opengauss_list b/build/script/aarch64_opengauss_list index c21a558966..7b7e88d826 100644 --- a/build/script/aarch64_opengauss_list +++ b/build/script/aarch64_opengauss_list @@ -48,6 +48,7 @@ ./bin/gs_plan_simulator.sh ./bin/pg_xlogdump ./bin/pagehack +./bin/pg_archivecleanup ./bin/gs_assessment ./bin/gs_retrieve ./etc/kerberos/kadm5.acl diff --git a/build/script/utils/make_compile.sh b/build/script/utils/make_compile.sh index 9dc4a7b970..bcbdebdf9f 100644 --- a/build/script/utils/make_compile.sh +++ b/build/script/utils/make_compile.sh @@ -30,6 +30,7 @@ ROACH_DIR="${ROOT_DIR}/distribute/bin/roach" MPPDB_DECODING_DIR="${ROOT_DIR}/contrib/mppdb_decoding" XLOG_DUMP_DIR="${ROOT_DIR}/contrib/pg_xlogdump" PAGE_HACK_DIR="${ROOT_DIR}/contrib/pagehack" +ARCH_CLEAN_DIR="${ROOT_DIR}/contrib/pg_archivecleanup" ################################### @@ -326,6 +327,12 @@ function install_gaussdb() make install -sj >> "$LOG_FILE" 2>&1 echo "End make install pagehack" >> "$LOG_FILE" 2>&1 + cd "$ARCH_CLEAN_DIR" + make clean >> "$LOG_FILE" 2>&1 + make -sj >> "$LOG_FILE" 2>&1 + make install -sj >> "$LOG_FILE" 2>&1 + echo "End make install archivecleanup" >> "$LOG_FILE" 2>&1 + chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 get_kernel_commitid diff --git a/build/script/x86_64_finance_list b/build/script/x86_64_finance_list index 899b439da2..36843f4055 100644 --- a/build/script/x86_64_finance_list +++ b/build/script/x86_64_finance_list @@ -47,6 +47,7 @@ ./bin/gs_plan_simulator.sh ./bin/pg_xlogdump ./bin/pagehack +./bin/pg_archivecleanup ./bin/gs_assessment ./bin/gs_retrieve ./etc/kerberos/kadm5.acl diff --git a/build/script/x86_64_opengauss_list b/build/script/x86_64_opengauss_list index 1273cd99c6..5d462ccd82 100644 --- a/build/script/x86_64_opengauss_list +++ b/build/script/x86_64_opengauss_list @@ -48,6 +48,7 @@ ./bin/gs_plan_simulator.sh ./bin/pg_xlogdump ./bin/pagehack +./bin/pg_archivecleanup ./bin/gs_assessment ./bin/gs_retrieve ./etc/kerberos/kadm5.acl -- Gitee From 5eaf0ef473ed56bb0c7c57d2ac0c3d7eaf062c92 Mon Sep 17 00:00:00 2001 From: lyannaa <1016943941@qq.com> Date: Fri, 23 Aug 2024 20:04:35 +0800 Subject: [PATCH 210/347] =?UTF-8?q?1.toast=E8=A1=A8=E5=9C=BA=E6=99=AF?= =?UTF-8?q?=E4=B8=8B=E8=8E=B7=E5=8F=96oldestXmin=202.autovacuum=E6=B5=81?= =?UTF-8?q?=E7=A8=8B=E4=BC=98=E5=8C=96=203.=E5=88=AA=E9=99=A4=E5=86=97?= =?UTF-8?q?=E9=A4=98=E4=BB=A3=E7=A2=BC,clean=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../backend/utils/adt/pgundostatfuncs.cpp | 6 +- src/common/backend/utils/error/be_module.cpp | 2 +- .../optimizer/commands/tablecmds.cpp | 18 +-- src/gausskernel/optimizer/commands/vacuum.cpp | 32 +---- src/gausskernel/optimizer/commands/verify.cpp | 3 - .../process/postmaster/autovacuum.cpp | 28 +--- .../process/postmaster/globalstats.cpp | 1 - src/gausskernel/storage/access/heap/hio.cpp | 9 +- .../storage/access/transam/xact.cpp | 2 +- .../storage/access/ubtree/ubtdump.cpp | 13 +- .../storage/access/ubtree/ubtinsert.cpp | 20 +-- .../storage/access/ubtree/ubtpage.cpp | 133 +++++++++--------- .../storage/access/ubtree/ubtrecycle.cpp | 51 ++----- .../storage/access/ubtree/ubtree.cpp | 2 +- .../storage/access/ustore/knl_uheap.cpp | 7 +- .../storage/access/ustore/knl_undoaction.cpp | 7 +- .../storage/access/ustore/knl_uscan.cpp | 2 +- .../storage/access/ustore/knl_utuptoaster.cpp | 6 +- .../storage/access/ustore/knl_uvacuumlazy.cpp | 10 +- .../storage/access/ustore/knl_uvisibility.cpp | 4 +- .../access/ustore/undo/knl_uundoapi.cpp | 12 +- .../access/ustore/undo/knl_uundorecycle.cpp | 109 +++++++------- .../access/ustore/undo/knl_uundospace.cpp | 8 +- .../access/ustore/undo/knl_uundotxn.cpp | 12 +- .../access/ustore/undo/knl_uundozone.cpp | 13 +- src/gausskernel/storage/tcap/tcap_drop.cpp | 2 +- src/include/access/hio.h | 3 +- src/include/access/ubtree.h | 57 ++++---- src/include/access/ustore/knl_undorequest.h | 2 +- .../access/ustore/undo/knl_uundotype.h | 8 +- src/include/utils/be_module.h | 2 +- .../expected/test_ustore_partition.out | 59 ++++---- .../regress/sql/test_ustore_partition.sql | 26 ++-- 33 files changed, 293 insertions(+), 376 deletions(-) diff --git a/src/common/backend/utils/adt/pgundostatfuncs.cpp b/src/common/backend/utils/adt/pgundostatfuncs.cpp index 3b5e22aadd..1d5787de38 100644 --- a/src/common/backend/utils/adt/pgundostatfuncs.cpp +++ b/src/common/backend/utils/adt/pgundostatfuncs.cpp @@ -672,7 +672,7 @@ static bool ParseUndoRecord(UndoRecPtr urp, Tuplestorestate *tupstore, TupleDesc break; } } while (true); - + return true; } @@ -829,7 +829,7 @@ static void PutTranslotInfoToTuple(int zoneId, uint32 offset, TransactionSlot *s rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, offset); securec_check_ss(rc, "\0", "\0"); values[ARR_4] = CStringGetTextDatum(textBuffer); - if (UHeapTransactionIdDidCommit((uint64)slot->XactId())) { + if (TransactionIdDidCommit((uint64)slot->XactId())) { values[ARR_5] = COMMITED_STATUS; } else if (TransactionIdIsInProgress((uint64)slot->XactId())) { values[ARR_5] = INPROCESS_STATUS; @@ -997,7 +997,7 @@ static void ReadTranslotFromMemory(int startIdx, int endIdx, UNDO_PTR_GET_OFFSET(slotPtr)); securec_check_ss(rc, "\0", "\0"); values[ARR_4] = CStringGetTextDatum(textBuffer); - if (UHeapTransactionIdDidCommit((uint64)slot->XactId())) { + if (TransactionIdDidCommit((uint64)slot->XactId())) { values[ARR_5] = COMMITED_STATUS; } else if (TransactionIdIsInProgress((uint64)slot->XactId())) { values[ARR_5] = INPROCESS_STATUS; diff --git a/src/common/backend/utils/error/be_module.cpp b/src/common/backend/utils/error/be_module.cpp index 2058d0e822..cefd7d4620 100755 --- a/src/common/backend/utils/error/be_module.cpp +++ b/src/common/backend/utils/error/be_module.cpp @@ -136,7 +136,7 @@ const module_data module_map[] = {{MOD_ALL, "ALL"}, {MOD_DSS, "DSS_API"}, {MOD_GPI, "GPI"}, {MOD_PARTITION, "PARTITION"}, - {MOD_UBT_NEWPAGE, "UBT_NEWPAGE"}, + {MOD_UBTREE, "UBTREE"}, {MOD_SRF, "SRF"}, {MOD_SS_TXNSTATUS, "SS_TXNSTATUS"}, diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 4a4e57a978..f5f8269bae 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -20664,7 +20664,7 @@ static void copy_relation_data(Relation rel, SMgrRelation* dstptr, ForkNumber fo UnlockReleaseBuffer(buf); } else { if (RelationIsUstoreFormat(rel)) { - if (ExecuteUndoActionsPageForPartition(rel, dst, forkNum, blkno, blkno, ROLLBACK_OP_FOR_MOVE_TBLSPC)) { + if (ExecuteUndoActionsForPartition(rel, dst, forkNum, blkno, blkno, ROLLBACK_OP_FOR_MOVE_TBLSPC)) { *dstptr = dst = smgropen(newFileNode, backendId); src = rel->rd_smgr; } @@ -20913,7 +20913,7 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char pToastInfo.destToastRelOid = dest->rd_id; pToastInfo.chunkIdHashTable = chunkIdHashTable; } - if (ExecuteUndoActionsPageForPartition(src, dest->rd_smgr, forkNum, src_blkno, dest_blkno, + if (ExecuteUndoActionsForPartition(src, dest->rd_smgr, forkNum, src_blkno, dest_blkno, ROLLBACK_OP_FOR_MERGE_PARTITION, &pToastInfo)) { RelationOpenSmgr(dest); } @@ -27970,7 +27970,7 @@ static bool checkChunkIdRepeat(List* srcPartToastRels, int selfIndex, Oid chunkI return false; } -static void ExecUndoActionsPageForRelation(Relation rel) +static void ExecuteUndoActionsForRelation(Relation rel) { Assert(RelationIsUstoreFormat(rel)); @@ -27983,25 +27983,25 @@ static void ExecUndoActionsPageForRelation(Relation rel) } for (BlockNumber blkno = 0; blkno < srcHeapBlocks; blkno ++) { - ExecuteUndoActionsPageForPartition(rel, rel->rd_smgr, MAIN_FORKNUM, blkno, blkno, + ExecuteUndoActionsForPartition(rel, rel->rd_smgr, MAIN_FORKNUM, blkno, blkno, ROLLBACK_OP_FOR_EXCHANGE_PARTITION); } RelationCloseSmgr(rel); } -static void ExecUndoActionsPageForExchangePartition(Relation partTableRel, Oid partOid, Relation ordTableRel) +static void ExecuteUndoActionsForExchangePartition(Relation partTableRel, Oid partOid, Relation ordTableRel) { Partition part = NULL; Relation partRel = NULL; part = partitionOpen(partTableRel, partOid, NoLock); partRel = partitionGetRelation(partTableRel, part); - ExecUndoActionsPageForRelation(partRel); + ExecuteUndoActionsForRelation(partRel); releaseDummyRelation(&partRel); partitionClose(partTableRel, part, NoLock); - ExecUndoActionsPageForRelation(ordTableRel); + ExecuteUndoActionsForRelation(ordTableRel); } // Description : Execute exchange @@ -28086,7 +28086,7 @@ static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd) checkIndexForExchange(partTableRel, partOid, ordTableRel, &partIndexList, &ordIndexList); if (RelationIsUstoreFormat(partTableRel)) { - ExecUndoActionsPageForExchangePartition(partTableRel, partOid, ordTableRel); + ExecuteUndoActionsForExchangePartition(partTableRel, partOid, ordTableRel); } // Check if the tables are colstore @@ -33171,7 +33171,7 @@ void ExecutePurge(PurgeStmt *stmt) } case PURGE_RECYCLEBIN: { Oid userId = GetUserId(); - /* + /* * Superusers bypass all permission checking. * Database Security: Support seperation of privilege. */ diff --git a/src/gausskernel/optimizer/commands/vacuum.cpp b/src/gausskernel/optimizer/commands/vacuum.cpp index 95305f6729..3d7bffbff9 100644 --- a/src/gausskernel/optimizer/commands/vacuum.cpp +++ b/src/gausskernel/optimizer/commands/vacuum.cpp @@ -117,7 +117,6 @@ typedef struct { } VacStates; extern void do_delta_merge(List* infos, VacuumStmt* stmt); -extern void ForceVacuumUHeapRelBypass(Relation onerel, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy); /* release all memory in infos */ static void free_merge_info(List* infos); @@ -135,7 +134,6 @@ static void GPIVacuumMainPartition( static void CBIVacuumMainPartition( Relation onerel, const VacuumStmt* vacstmt, LOCKMODE lockmode, BufferAccessStrategy bstrategy); -static void UstoreVacuum(Relation real, VacuumStmt *vacstmt, LOCKMODE lockmode, BufferAccessStrategy vacstrategy); #define TryOpenCStoreInternalRelation(r, lmode, r1, r2) \ do { \ @@ -1859,10 +1857,10 @@ static inline void proc_snapshot_and_transaction() } static inline void -TableRelationVacuum(Relation rel, VacuumStmt *vacstmt, LOCKMODE lockmode, BufferAccessStrategy vacStrategy) +TableRelationVacuum(Relation rel, VacuumStmt *vacstmt, BufferAccessStrategy vacStrategy) { if (RelationIsUstoreFormat(rel)) { - UstoreVacuum(rel, vacstmt, lockmode, vacStrategy); + LazyVacuumUHeapRel(rel, vacstmt, vacStrategy); } else { lazy_vacuum_rel(rel, vacstmt, vacStrategy); } @@ -2766,7 +2764,7 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) if (vacuumMainPartition((uint32)(vacstmt->flags))) { pgstat_report_waitstatus_relname(STATE_VACUUM, get_nsp_relname(relid)); if (RelationIsUstoreFormat(onerel)) { - UstoreVacuum(onerel, vacstmt, lmode, vac_strategy); + UstoreVacuumMainPartitionGPIs(onerel, vacstmt, lmode, vac_strategy); } else { GPIVacuumMainPartition(onerel, vacstmt, lmode, vac_strategy); CBIVacuumMainPartition(onerel, vacstmt, lmode, vac_strategy); @@ -2774,7 +2772,7 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) pgstat_report_vacuum(relid, InvalidOid, false, 0); } else { pgstat_report_waitstatus_relname(STATE_VACUUM, get_nsp_relname(relid)); - TableRelationVacuum(onerel, vacstmt, lmode, vac_strategy); + TableRelationVacuum(onerel, vacstmt, vac_strategy); } } (void)pgstat_report_waitstatus(oldStatus); @@ -4339,25 +4337,3 @@ int GetVacuumLogLevel(void) #endif } -static void UstoreVacuum(Relation rel, VacuumStmt *vacstmt, LOCKMODE lockmode, BufferAccessStrategy vacstrategy) -{ - bool partTable = RelationIsPartitioned(rel); - - /* Only GPI cleanup is performed for partitioned tables regardless of autovacuum or vacuum*/ - if (partTable) { - UstoreVacuumMainPartitionGPIs(rel, vacstmt, lockmode, vacstrategy); - return; - } - - if (IsAutoVacuumWorkerProcess()) { - /* In the autovacuum process, build fsm tree for common tables, toast tables, or partitions. */ - FreeSpaceMapVacuum(rel); - if (vacstmt->needFreeze) { - /* Force vacuum for recycle clog. */ - ForceVacuumUHeapRelBypass(rel, vacstmt, vacstrategy); - } - } else { - /*In vacuum process, the Uheap page and indexes will be cleared for tables, toast, and partitions. */ - LazyVacuumUHeapRel(rel, vacstmt, vacstrategy); - } -} diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp index 44b068a546..ee7923dd61 100644 --- a/src/gausskernel/optimizer/commands/verify.cpp +++ b/src/gausskernel/optimizer/commands/verify.cpp @@ -1267,9 +1267,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber f char* buf = (char*)palloc(BLCKSZ); BlockNumber nblocks; BlockNumber blkno; - bool isValidRelationPage = true; - char* namespace_name = get_namespace_name(RelationGetNamespace(rel)); RelationOpenSmgr(rel); @@ -1900,7 +1898,6 @@ static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, ForkNum } else { UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, NULL, blkno); } - } PG_CATCH(); { diff --git a/src/gausskernel/process/postmaster/autovacuum.cpp b/src/gausskernel/process/postmaster/autovacuum.cpp index 8168937413..e357a44cd7 100755 --- a/src/gausskernel/process/postmaster/autovacuum.cpp +++ b/src/gausskernel/process/postmaster/autovacuum.cpp @@ -181,7 +181,7 @@ static void partition_needs_vacanalyze(Oid partid, AutoVacOpts* relopts, Form_pg HeapTuple partTuple, at_partitioned_table* ap_entry, PgStat_StatTabEntry* tabentry, bool is_recheck, bool* dovacuum, bool* doanalyze, bool* need_freeze); static autovac_table* partition_recheck_autovac( - vacuum_object* vacObj, HTAB* table_relopt_map, HTAB* partitioned_tables_map, TupleDesc partTupDesc); + vacuum_object* vacObj, HTAB* table_relopt_map, HTAB* partitioned_tables_map, TupleDesc pg_class_desc); extern void DoVacuumMppTable(VacuumStmt* stmt, const char* queryString, bool isTopLevel, bool sentToRemote); /* * Called when the AutoVacuum is ending. @@ -3026,7 +3026,8 @@ static autovac_table* table_recheck_autovac( return NULL; classForm = (Form_pg_class)GETSTRUCT(classTup); bytea *rawRelopts = extractRelOptions(classTup, pg_class_desc, InvalidOid); - + bool isUstorePartitionTable = (rawRelopts != NULL && RelationIsTableAccessMethodUStoreType(rawRelopts) && + isPartitionedRelation(classForm)); /* * Get the applicable reloptions. If it is a TOAST table, try to get the * main table reloptions if the toast table itself doesn't have. @@ -3067,12 +3068,12 @@ static autovac_table* table_recheck_autovac( } /* OK, it needs something done */ - if (doanalyze || dovacuum || dovacuum_toast) { + if (doanalyze || dovacuum || dovacuum_toast || isUstorePartitionTable) { tab = calculate_vacuum_cost_and_freezeages(avopts, doanalyze, need_freeze); if (tab != NULL) { tab->at_relid = relid; tab->at_sharedrel = classForm->relisshared; - tab->at_dovacuum = dovacuum || dovacuum_toast; + tab->at_dovacuum = isUstorePartitionTable ? true : (dovacuum || dovacuum_toast); tab->at_gpivacuumed = vacObj->gpi_vacuumed; } } @@ -3179,7 +3180,6 @@ static void relation_needs_vacanalyze(Oid relid, AutoVacOpts* relopts, bytea* ra bool delta_vacuum = false; bool av_enabled = false; bool userEnabled = true; - bool ustoreTable = false; /* pg_class.reltuples */ float4 reltuples; @@ -3298,16 +3298,6 @@ static void relation_needs_vacanalyze(Oid relid, AutoVacOpts* relopts, bytea* ra *dovacuum = *dovacuum && userEnabled; - /* The AutoVacuum function is temporarily enabled for the ustore table. */ - ustoreTable = (rawRelopts != NULL && RelationIsTableAccessMethodUStoreType(rawRelopts)); - if (ustoreTable && !(*dovacuum)) { - Relation tmprel = heap_open(relid, NoLock); - if(RelationIsPartitioned(tmprel)) { - *dovacuum = true; - } - heap_close(tmprel, NoLock); - } - if (*dovacuum || *doanalyze) { AUTOVAC_LOG(DEBUG2, "vac \"%s\": recheck = %s need_freeze = %s dovacuum = %s (dead tuples %ld " "vacuum threshold %.0f) doanalyze = %s (changed tuples %ld analyze threshold %.0f)", @@ -3780,12 +3770,6 @@ static void partition_needs_vacanalyze(Oid partid, AutoVacOpts* relopts, Form_pg *doanalyze = (anltuples > anlthresh) && false; } - ustorePartTbl = (partoptions != NULL && RelationIsTableAccessMethodUStoreType(partoptions)); - if (ustorePartTbl && av_enabled && ap_entry->at_allowvacuum) { - *dovacuum = true; - } - pfree_ext(partoptions); - if (!is_recheck && (*dovacuum || *doanalyze)) { AUTOVAC_LOG(DEBUG2, "vac table \"%s\" partition(\"%s\"): recheck = %s need_freeze = %s " "dovacuum = %s (dead tuples %ld vacuum threshold %.0f)", @@ -3800,7 +3784,7 @@ static void partition_needs_vacanalyze(Oid partid, AutoVacOpts* relopts, Form_pg } static autovac_table* partition_recheck_autovac( - vacuum_object* vacObj, HTAB* table_relopt_map, HTAB* partitioned_tables_map, TupleDesc partTupDesc) + vacuum_object* vacObj, HTAB* table_relopt_map, HTAB* partitioned_tables_map, TupleDesc pg_class_desc) { Oid partid = vacObj->tab_oid; bool dovacuum = false; diff --git a/src/gausskernel/process/postmaster/globalstats.cpp b/src/gausskernel/process/postmaster/globalstats.cpp index 0f8cbc3ca6..8e07348fb6 100644 --- a/src/gausskernel/process/postmaster/globalstats.cpp +++ b/src/gausskernel/process/postmaster/globalstats.cpp @@ -444,7 +444,6 @@ void StartBlockHashTableRemove(PgStat_StartBlockTableKey *tabkey) } LWLock *lock = LockStartBlockHashTablePartition(tabkey, LW_EXCLUSIVE); - (void)hash_search(g_instance.stat_cxt.tableStat->blocks_map, tabkey, HASH_REMOVE, NULL); LWLockRelease(lock); } diff --git a/src/gausskernel/storage/access/heap/hio.cpp b/src/gausskernel/storage/access/heap/hio.cpp index c559613c96..fb71d5a7e3 100644 --- a/src/gausskernel/storage/access/heap/hio.cpp +++ b/src/gausskernel/storage/access/heap/hio.cpp @@ -129,7 +129,7 @@ void CheckRelation(const Relation relation, int* extraBlocks, int lockWaiters) } } -static void UBtreeAddExtraBlocks(Relation relation, BulkInsertState bistate, NewPageState* npstate) +static void UBtreeAddExtraBlocks(Relation relation, BulkInsertState bistate) { int extraBlocks = 0; int lockWaiters = RelationExtensionLockWaiterCount(relation); @@ -137,9 +137,6 @@ static void UBtreeAddExtraBlocks(Relation relation, BulkInsertState bistate, New return; } CheckRelation(relation, &extraBlocks, lockWaiters); - if (npstate != NULL) { - npstate->extendBlocks += (uint32)extraBlocks; - } while (extraBlocks-- >= 0) { /* Ouch - an unnecessary lseek() each time through the loop! */ Buffer buffer = ReadBufferBI(relation, P_NEW, RBM_NORMAL, bistate); @@ -148,7 +145,7 @@ static void UBtreeAddExtraBlocks(Relation relation, BulkInsertState bistate, New } } -void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate, NewPageState* npstate) +void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) { BlockNumber block_num = InvalidBlockNumber; BlockNumber first_block = InvalidBlockNumber; @@ -158,7 +155,7 @@ void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate, NewPageS if (RelationIsUstoreIndex(relation)) { /* ubtree, use another bypass */ - UBtreeAddExtraBlocks(relation, bistate, npstate); + UBtreeAddExtraBlocks(relation, bistate); return; } diff --git a/src/gausskernel/storage/access/transam/xact.cpp b/src/gausskernel/storage/access/transam/xact.cpp index b91deff822..24e4ad2a3b 100755 --- a/src/gausskernel/storage/access/transam/xact.cpp +++ b/src/gausskernel/storage/access/transam/xact.cpp @@ -8312,7 +8312,7 @@ UndoRecPtr GetCurrentTransactionUndoRecPtr(UndoPersistence upersistence) void TryExecuteUndoActions(TransactionState s, UndoPersistence pLevel) { if (!u_sess->attr.attr_storage.enable_ustore_sync_rollback && - !(IsSubTransaction() || pLevel == UNDO_TEMP || pLevel == UNDO_UNLOGGED)) { + !(IsSubTransaction() || pLevel == UNDO_TEMP)) { return; } diff --git a/src/gausskernel/storage/access/ubtree/ubtdump.cpp b/src/gausskernel/storage/access/ubtree/ubtdump.cpp index 896d33fa97..9aeda50b59 100644 --- a/src/gausskernel/storage/access/ubtree/ubtdump.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtdump.cpp @@ -26,7 +26,7 @@ #include "access/transam.h" #include "access/ubtree.h" #include "utils/builtins.h" -#include "storage/procarray.h" +#include "storage/procarray.h" static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, @@ -419,7 +419,6 @@ static bool UBTreeVerifyTupleTransactionStatus(Relation rel, BlockNumber blkno, case XID_ABORTED: tranStatusError = (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED); break; - default: break; } @@ -435,7 +434,7 @@ static bool UBTreeVerifyTupleTransactionStatus(Relation rel, BlockNumber blkno, } return true; } - + static int ItemCompare(const void *item1, const void *item2) { return ((ItemIdSort)item1)->start - ((ItemIdSort)item2)->start; @@ -444,7 +443,7 @@ static int ItemCompare(const void *item1, const void *item2) void UBTreeVerifyHikey(Relation rel, Page page, BlockNumber blkno) { CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); if (P_RIGHTMOST(opaque)) @@ -538,9 +537,11 @@ static void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockN TransactionId minCommittedXmax = MaxTransactionId; TransactionId pruneXid = ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid); OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - + TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; + if (rel && RelationGetNamespace(rel) == PG_TOAST_NAMESPACE) { + GetOldestXminForUndo(&oldestXmin); + } for (OffsetNumber offnum = startoffset; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { ItemId itemid = PageGetItemId(page, offnum); IndexTuple tuple = (IndexTuple)PageGetItem(page, itemid); diff --git a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp index 400a7a4577..5f2efeb9a6 100644 --- a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp @@ -1372,15 +1372,7 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi * before we release the Exclusive lock. */ UBTRecycleQueueAddress addr; - NewPageState *npstate = NULL; - if (module_logging_is_on(MOD_UBT_NEWPAGE)) { - npstate = (NewPageState *)palloc0(sizeof(NewPageState)); - } - rbuf = UBTreeGetNewPage(rel, &addr, npstate); - if (npstate != NULL) { - UBTreePrintNewPageState(npstate); - pfree(npstate); - } + rbuf = UBTreeGetNewPage(rel, &addr); /* * origpage is the original page to be split. leftpage is a temporary @@ -2446,15 +2438,7 @@ static Buffer UBTreeNewRoot(Relation rel, Buffer lbuf, Buffer rbuf) * before we release the Exclusive lock. */ UBTRecycleQueueAddress addr; - NewPageState *npstate = NULL; - if (module_logging_is_on(MOD_UBT_NEWPAGE)) { - npstate = (NewPageState *)palloc0(sizeof(NewPageState)); - } - rootbuf = UBTreeGetNewPage(rel, &addr, npstate); - if (npstate != NULL) { - UBTreePrintNewPageState(npstate); - pfree(npstate); - } + rootbuf = UBTreeGetNewPage(rel, &addr); rootpage = BufferGetPage(rootbuf); rootblknum = BufferGetBlockNumber(rootbuf); diff --git a/src/gausskernel/storage/access/ubtree/ubtpage.cpp b/src/gausskernel/storage/access/ubtree/ubtpage.cpp index 43b596fc32..35ee49897f 100644 --- a/src/gausskernel/storage/access/ubtree/ubtpage.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtpage.cpp @@ -89,25 +89,22 @@ void UBTreeInitMetaPage(Page page, BlockNumber rootbknum, uint32 level) ((PageHeader)page)->pd_lower = (uint16)(((char *)metad + sizeof(BTMetaPageData)) - (char *)page); } -void UBTreePrintNewPageState(NewPageState* npstate) +static void UBTreeLogAndFreeNewPageStats(UBTreeGetNewPageStats* stats) { - ereport(LOG, (errmodule(MOD_UBT_NEWPAGE), (errmsg( - "NewPageState: first_get_available_page_time:%ld, count:%u; second_get_available_page_time:%ld, count:%u; " - "extend_blocks_time:%ld, count:%u, blocks:%u; extend_one_time:%ld, count:%u; " - "get_head_time:%ld; get_available_page_on_page_time:%ld, get_available_page_on_page_time_max:%ld; " - "buffer_invalid_count:%u; need_lock_count:%u; queue_count:%u, items_count:%u, items_valid_count:%u; " - "conditional_lock_count:%u; get_available_page_on_page_count:%u; goto_restart_count first:%u, second:%u;" - "new_create_pages_count check:%u, get:%u; avg_travel_queue pages:%.2f, items:%.2f.", - npstate->firstGetAvailablePageTime, npstate->firstGetAvailablePageCount, - npstate->secondGetAvailablePageTime, npstate->secondGetAvailablePageCount, - npstate->extendBlocksTime, npstate->extendBlocksCount, npstate->extendBlocks, - npstate->extendOneTime, npstate->extendOneCount, npstate->getHeadTime, - npstate->getAvailablePageOnPageTime, npstate->getAvailablePageOnPageTimeMax, - npstate->bufferInvalidCount, npstate->needLockCount, npstate->queueCount, npstate->itemsCount, - npstate->itemsValidCount, npstate->itemsValidConditionalLockCount, - npstate->getAvailablePageOnPageCount, npstate->firstGotoRestartCount, - npstate->secondGotoRestartCount, npstate->checkNewCreatePagesCount, - npstate->getFromNewCreatePagesCount, npstate->avgTravelQueuePages, npstate->avgTravelQueueItems)))); + if (stats) { + ereport(LOG, (errmodule(MOD_UBTREE), (errmsg( + "UBTreeGetNewPageStats: rnode=[%u, %u, %u], getAvailablePage[total, count, max]=[%ld, %u, %ld], " + "addExtraBlocks[total, count, max]=[%ld, %u, %ld], extendOne[total, count, max]=[%ld, %u, %ld], " + "getOnUrqPage[total, count, max]=[%ld, %u, %ld]," + "urqItemsCount:%u; restartCount first:%u, checkNonTrackedPagesCount:%u;", + stats->spcnode, stats->dbnode, stats->relnode, + stats->getAvailablePageTime, stats->getAvailablePageCount, stats->getAvailablePageTimeMax, + stats->addExtraBlocksTime, stats->addExtraBlocksCount, stats->addExtraBlocksTimeMax, + stats->extendOneTime, stats->extendOneCount, stats->extendOneTimeMax, + stats->getOnUrqPageTime, stats->getOnUrqPageCount, stats->getOnUrqPageTimeMax, + stats->urqItemsCount, stats->restartCount, stats->checkNonTrackedPagesCount)))); + pfree(stats); + } } /* @@ -277,15 +274,7 @@ Buffer UBTreeGetRoot(Relation rel, int access) * before we release the Exclusive lock. */ UBTRecycleQueueAddress addr; - NewPageState *npstate = NULL; - if (module_logging_is_on(MOD_UBT_NEWPAGE)) { - npstate = (NewPageState *)palloc0(sizeof(NewPageState)); - } - rootbuf = UBTreeGetNewPage(rel, &addr, npstate); - if (npstate != NULL) { - UBTreePrintNewPageState(npstate); - pfree(npstate); - } + rootbuf = UBTreeGetNewPage(rel, &addr); rootblkno = BufferGetBlockNumber(rootbuf); rootpage = BufferGetPage(rootbuf); rootopaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(rootpage); @@ -1461,7 +1450,7 @@ static bool UBTreeUnlinkHalfDeadPage(Relation rel, Buffer leafbuf, bool *rightsi _bt_relbuf(rel, buf); } if (del_blknos != NULL) { - BTStack del_blkno = (BTStack) palloc0(sizeof(BTStackData)); + BTStack del_blkno = (BTStack)palloc0(sizeof(BTStackData)); del_blkno->bts_blkno = target; del_blknos->bts_parent = del_blkno; } @@ -1483,19 +1472,24 @@ static bool UBTreeUnlinkHalfDeadPage(Relation rel, Buffer leafbuf, bool *rightsi * page in the Recycle Queue, and we need to call UBTreeRecordUsedPage() * with this addr when the returned page is used correctly. */ -Buffer UBTreeGetNewPage(Relation rel, UBTRecycleQueueAddress* addr, NewPageState* npstate) +Buffer UBTreeGetNewPage(Relation rel, UBTRecycleQueueAddress* addr) { WHITEBOX_TEST_STUB("UBTreeGetNewPage-begin", WhiteboxDefaultErrorEmit); TimestampTz startTime = 0; + UBTreeGetNewPageStats* stats = NULL; + if (module_logging_is_on(MOD_UBTREE)) { + stats = (UBTreeGetNewPageStats*)palloc0(sizeof(UBTreeGetNewPageStats)); + stats->spcnode = rel->rd_node.spcNode; + stats->dbnode = rel->rd_node.dbNode; + stats->relnode = rel->rd_node.relNode; + } restart: - if (npstate != NULL) { + if (stats) { + stats->restartCount++; startTime = GetCurrentTimestamp(); } - Buffer buf = UBTreeGetAvailablePage(rel, RECYCLE_FREED_FORK, addr, npstate); - if (npstate != NULL) { - npstate->firstGetAvailablePageTime += GetCurrentTimestamp() - startTime; - npstate->firstGetAvailablePageCount++; - } + Buffer buf = UBTreeGetAvailablePage(rel, RECYCLE_FREED_FORK, addr, stats); + UBTreeRecordGetNewPageCost(stats, GET_PAGE, startTime); if (buf == InvalidBuffer) { /* * No free page left, need to extend the relation @@ -1507,51 +1501,36 @@ restart: * page. We can skip locking for new or temp relations, however, * since no one else could be accessing them. */ - if (npstate != NULL) { - npstate->bufferInvalidCount++; - } bool needLock = !RELATION_IS_LOCAL(rel); if (needLock) { - if (npstate != NULL) { - npstate->needLockCount++; - } if (!ConditionalLockRelationForExtension(rel, ExclusiveLock)) { /* couldn't get the lock immediately; wait for it. */ LockRelationForExtension(rel, ExclusiveLock); - if (npstate != NULL) { + if (stats) { startTime = GetCurrentTimestamp(); } /* check again, relation may extended by other backends */ - buf = UBTreeGetAvailablePage(rel, RECYCLE_FREED_FORK, addr, npstate); - if (npstate != NULL) { - npstate->secondGetAvailablePageTime += GetCurrentTimestamp() - startTime; - npstate->secondGetAvailablePageCount++; - } + buf = UBTreeGetAvailablePage(rel, RECYCLE_FREED_FORK, addr, stats); + UBTreeRecordGetNewPageCost(stats, GET_PAGE, startTime); if (buf != InvalidBuffer) { UnlockRelationForExtension(rel, ExclusiveLock); goto out; } - if (npstate != NULL) { + if (stats) { startTime = GetCurrentTimestamp(); } /* Time to bulk-extend. */ - RelationAddExtraBlocks(rel, NULL, npstate); - if (npstate != NULL) { - npstate->extendBlocksTime += GetCurrentTimestamp() - startTime; - npstate->extendBlocksCount++; - } + RelationAddExtraBlocks(rel, NULL); + UBTreeRecordGetNewPageCost(stats, ADD_BLOCKS, startTime); WHITEBOX_TEST_STUB("UBTreeGetNewPage-bulk-extend", WhiteboxDefaultErrorEmit); } } - if (npstate != NULL) { + if (stats) { startTime = GetCurrentTimestamp(); } /* extend by one page */ buf = ReadBuffer(rel, P_NEW); - if (npstate != NULL) { - npstate->extendOneTime += GetCurrentTimestamp() - startTime; - npstate->extendOneCount++; - } + UBTreeRecordGetNewPageCost(stats, EXTEND_ONE, startTime); WHITEBOX_TEST_STUB("UBTreeGetNewPage-extend", WhiteboxDefaultErrorEmit); if (!ConditionalLockBuffer(buf)) { /* lock failed. To avoid dead lock, we need to retry */ @@ -1559,9 +1538,6 @@ restart: UnlockRelationForExtension(rel, ExclusiveLock); } ReleaseBuffer(buf); - if (npstate != NULL) { - npstate->firstGotoRestartCount++; - } goto restart; } /* @@ -1586,9 +1562,6 @@ out: ReleaseBuffer(addr->queueBuf); addr->queueBuf = InvalidBuffer; } - if (npstate != NULL) { - npstate->secondGotoRestartCount++; - } goto restart; } @@ -1604,6 +1577,7 @@ out: } } UBTreePageInit(page, BufferGetPageSize(buf)); + UBTreeLogAndFreeNewPageStats(stats); return buf; } @@ -1634,3 +1608,34 @@ static void UBTreeLogReusePage(Relation rel, BlockNumber blkno, TransactionId la (void)XLogInsert(RM_UBTREE_ID, XLOG_UBTREE_REUSE_PAGE, rel->rd_node.bucketNode); } + +void UBTreeRecordGetNewPageCost(UBTreeGetNewPageStats* stats, NewPageCostType type, TimestampTz start) +{ + if (stats) { + TimestampTz cost = GetCurrentTimestamp() - start; + switch (type) { + case GET_PAGE: + stats->getAvailablePageTime += cost; + stats->getAvailablePageCount++; + stats->getAvailablePageTimeMax = Max(stats->getAvailablePageTimeMax, cost); + break; + case ADD_BLOCKS: + stats->addExtraBlocksTime += cost; + stats->addExtraBlocksCount++; + stats->addExtraBlocksTimeMax = Max(stats->addExtraBlocksTimeMax, cost); + break; + case EXTEND_ONE: + stats->extendOneTime += cost; + stats->extendOneCount++; + stats->extendOneTimeMax = Max(stats->extendOneTimeMax, cost); + break; + case URQ_GET_PAGE: + stats->getOnUrqPageTime += cost; + stats->getOnUrqPageCount++; + stats->getOnUrqPageTimeMax = Max(stats->getOnUrqPageTimeMax, cost); + break; + default: + break; + } + } +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp index bcec8572c4..74db5dc428 100644 --- a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp @@ -38,7 +38,7 @@ static void UBTreeRecycleQueueAddPage(Relation rel, UBTRecycleForkNumber forkNum BlockNumber blkno, TransactionId xid); static Buffer StepNextPage(Relation rel, Buffer buf); static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumber, Buffer buf, - TransactionId waterLevelXid, UBTRecycleQueueAddress *addr, bool *continueScan, NewPageState* npstate = NULL); + TransactionId waterLevelXid, UBTRecycleQueueAddress *addr, bool *continueScan, UBTreeGetNewPageStats* stats = NULL); static Buffer MoveToEndpointPage(Relation rel, Buffer buf, bool needHead, int access); static uint16 PageAllocateItem(Buffer buf); static void RecycleQueueLinkNewPage(Relation rel, Buffer leftBuf, Buffer newBuf); @@ -293,8 +293,7 @@ void UBTreeInitializeRecycleQueue(Relation rel) static bool UBTreeTryRecycleEmptyPageInternal(Relation rel) { UBTRecycleQueueAddress addr; - NewPageState *npstate = NULL; - Buffer buf = UBTreeGetAvailablePage(rel, RECYCLE_EMPTY_FORK, &addr, npstate); + Buffer buf = UBTreeGetAvailablePage(rel, RECYCLE_EMPTY_FORK, &addr, NULL); if (!BufferIsValid(buf)) { return false; /* no available page to recycle */ } @@ -365,15 +364,15 @@ static Buffer StepNextPage(Relation rel, Buffer buf) } static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumber, Buffer buf, - TransactionId WaterLevelXid, UBTRecycleQueueAddress *addr, bool *continueScan, NewPageState* npstate) + TransactionId WaterLevelXid, UBTRecycleQueueAddress *addr, bool *continueScan, UBTreeGetNewPageStats* stats) { Page page = BufferGetPage(buf); UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); uint16 curOffset = header->head; while (IsNormalOffset(curOffset)) { - if (npstate != NULL) { - npstate->itemsCount++; + if (stats) { + stats->urqItemsCount++; } UBTRecycleQueueItem item = HeaderGetItem(header, curOffset); if (TransactionIdFollowsOrEquals(item->xid, WaterLevelXid)) { @@ -384,9 +383,6 @@ static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumb curOffset = item->next; continue; } - if (npstate != NULL) { - npstate->itemsValidCount++; - } Buffer targetBuf = ReadBuffer(rel, item->blkno); _bt_checkbuffer_valid(rel, targetBuf); if (ConditionalLockBuffer(targetBuf)) { @@ -394,9 +390,6 @@ static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumb bool pageUsable = true; if (forkNumber == RECYCLE_FREED_FORK) { pageUsable = UBTreePageRecyclable(BufferGetPage(targetBuf)); - if (npstate != NULL) { - npstate->itemsValidConditionalLockCount++; - } } else if (forkNumber == RECYCLE_EMPTY_FORK) { /* make sure that it's not half-dead or the deletion is not reserved yet */ Page indexPage = BufferGetPage(targetBuf); @@ -434,11 +427,9 @@ static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumb } Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBTRecycleQueueAddress *addr, - NewPageState *npstate) + UBTreeGetNewPageStats *stats) { TimestampTz startTime = 0; - TimestampTz elapsedTime = 0; - uint32 getAvailablePageCount = 0; TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; if (RelationGetNamespace(rel) == PG_TOAST_NAMESPACE) { TransactionId frozenXid = g_instance.undo_cxt.globalFrozenXid; @@ -446,29 +437,15 @@ Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBT TransactionId waterLevelXid = ((forkNumber == RECYCLE_EMPTY_FORK) ? recycleXid : frozenXid); oldestXmin = Min(oldestXmin, waterLevelXid); } - if (npstate != NULL) { - getAvailablePageCount = npstate->firstGetAvailablePageCount + npstate->secondGetAvailablePageCount; - startTime = GetCurrentTimestamp(); - } Buffer queueBuf = RecycleQueueGetEndpointPage(rel, forkNumber, true, BT_READ); - if (npstate != NULL) { - npstate->getHeadTime += GetCurrentTimestamp() - startTime; - } Buffer indexBuf = InvalidBuffer; bool continueScan = false; - for (BlockNumber bufCount = 0; bufCount < URQ_MAX_GET_PAGE_TIMES; bufCount++) { - if (npstate != NULL) { - npstate->getAvailablePageOnPageCount++; - npstate->avgTravelQueuePages = (npstate->getAvailablePageOnPageCount * 1.0) / getAvailablePageCount; + for (BlockNumber bufCount = 0; bufCount < URQ_GET_PAGE_MAX_RETRY_TIMES; bufCount++) { + if (stats) { startTime = GetCurrentTimestamp(); } - indexBuf = GetAvailablePageOnPage(rel, forkNumber, queueBuf, oldestXmin, addr, &continueScan, npstate); - if (npstate != NULL) { - elapsedTime = GetCurrentTimestamp() - startTime; - npstate->getAvailablePageOnPageTimeMax = Max(npstate->getAvailablePageOnPageTimeMax, elapsedTime); - npstate->getAvailablePageOnPageTime += elapsedTime; - npstate->avgTravelQueueItems = (npstate->itemsCount * 1.0) / npstate->getAvailablePageOnPageCount; - } + indexBuf = GetAvailablePageOnPage(rel, forkNumber, queueBuf, oldestXmin, addr, &continueScan, stats); + UBTreeRecordGetNewPageCost(stats, URQ_GET_PAGE, startTime); if (!continueScan) { break; } @@ -488,8 +465,8 @@ Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBT return InvalidBuffer; } - if (npstate != NULL) { - npstate->checkNewCreatePagesCount++; + if (stats) { + stats->checkNonTrackedPagesCount++; } /* no available page found, but we can check new created pages */ @@ -514,9 +491,6 @@ Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBT indexBuf = ReadBuffer(rel, curBlkno); if (ConditionalLockBuffer(indexBuf)) { if (PageIsNew(BufferGetPage(indexBuf))) { - if (npstate != NULL) { - npstate->getFromNewCreatePagesCount++; - } break; } LockBuffer(indexBuf, BUFFER_LOCK_UNLOCK); @@ -1252,7 +1226,6 @@ static void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Relation uint16 itemMaxNum = BlockGetMaxItems(blkno); uint16 currOffset = header->head; uint16 prevOffset = InvalidOffset; - UBTRecycleQueueItem item = NULL; RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; diff --git a/src/gausskernel/storage/access/ubtree/ubtree.cpp b/src/gausskernel/storage/access/ubtree/ubtree.cpp index daae414351..404576b15c 100644 --- a/src/gausskernel/storage/access/ubtree/ubtree.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtree.cpp @@ -917,7 +917,7 @@ restart: /* Run pagedel in a temp context to avoid memory leakage */ MemoryContextReset(vstate->pagedelcontext); MemoryContext oldcontext = MemoryContextSwitchTo(vstate->pagedelcontext); - BTStack dummy_del_blknos = (BTStack) palloc0(sizeof(BTStackData)); + BTStack dummy_del_blknos = (BTStack)palloc0(sizeof(BTStackData)); int ndel = UBTreePageDel(rel, buf, dummy_del_blknos); if (ndel) { diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index f46ebd5ac2..553a2d041a 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -2774,9 +2774,9 @@ check_tup_satisfies_update: useInplaceUpdate = false; useLinkUpdate = false; } else if (!useInplaceUpdate) { - bool pruned = UHeapPagePruneOpt(relation, buffer, oldOffnum, newtupsize - oldtupsize); lp = UPageGetRowPtr(page, oldOffnum); - if (pruned && (RowPtrGetOffset(lp) + newtupsize <= BLCKSZ)) { + if (UHeapPagePruneOpt(relation, buffer, oldOffnum, newtupsize - oldtupsize) + && (RowPtrGetOffset(lp) + newtupsize <= BLCKSZ)) { useInplaceUpdate = true; } /* The page might have been modified, so refresh disk_tuple */ @@ -5682,7 +5682,8 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) /* Apply undo action for an INSERT */ if (urec->Blkno() != blkno) { ereport(PANIC, (errmodule(MOD_USTORE), errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Blkno %u of undorecord is different from buffer %u.", urec->Blkno(), blkno))); + errmsg("UHeapAbortSpeculative error: UndoRecord's blkno %u is not same with buffer %u.", + urec->Blkno(), blkno))); } ExecuteUndoForInsert(relation, buffer, urec->Offset(), urec->Xid()); diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index 57defd5233..c0ddd21453 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -417,7 +417,8 @@ int UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, TransactionI uint8 undotype = undorecord->Utype(); if (undorecord->Blkno() != blkno) { ereport(PANIC, (errmodule(MOD_USTORE), errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Blkno %u of undorecord is different from buffer %u.", undorecord->Blkno(), blkno))); + errmsg("UHeapUndoActions error: UndoRecord's blkno %u is not same with buffer %u.", + undorecord->Blkno(), blkno))); } /* * If the current UndoRecPtr on the slot is less than the @@ -1076,7 +1077,7 @@ bool UHeapUndoActionsFindRelidByRelfilenode(RelFileNode *relfilenode, Oid *reloi return true; } -bool ExecuteUndoActionsPageForPartition(Relation src, SMgrRelation dest, ForkNumber forkNum, BlockNumber srcBlkno, +bool ExecuteUndoActionsForPartition(Relation src, SMgrRelation dest, ForkNumber forkNum, BlockNumber srcBlkno, BlockNumber destBlkno, RollBackTypeForAlterTable opType, PartitionToastInfo *toastInfo) { RelationOpenSmgr(src); @@ -1170,7 +1171,6 @@ bool ExecuteUndoActionsPageForPartition(Relation src, SMgrRelation dest, ForkNum hashkey.oldChunkId = toastPointer->va_valueid; mapping = (OldToNewChunkIdMapping)hash_search(toastInfo->chunkIdHashTable, &hashkey, HASH_FIND, NULL); - if (PointerIsValid(mapping)) { toastPointer->va_valueid = mapping->newChunkId; } @@ -1193,7 +1193,6 @@ bool ExecuteUndoActionsPageForPartition(Relation src, SMgrRelation dest, ForkNum hashkey.oldChunkId = toastPointer->va_valueid; mapping = (OldToNewChunkIdMapping)hash_search(toastInfo->chunkIdHashTable, &hashkey, HASH_FIND, NULL); - if (PointerIsValid(mapping)) { toastPointer->va_valueid = mapping->newChunkId; } diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index 7a02e18e07..8bf8bfb494 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -1696,7 +1696,7 @@ static bool VerifyUHeapGetTup(UHeapScanDesc scan, ScanDirection dir) tuple = scan->rs_visutuples[lineOff]; scan->rs_base.rs_cindex = lineOff; scan->rs_cutup = tuple; - return tuple; + return true; } /* diff --git a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp index c6a2892aa4..130da8f175 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp @@ -932,11 +932,9 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options) IndexInfo *indexInfo = BuildIndexInfo(toastidx); EState *estate = NULL; - bool estateIsNotNull = false; if (indexInfo->ii_Expressions != NIL || indexInfo->ii_ExclusionOps != NULL) { estate = CreateExecutorState(); - estateIsNotNull = true; } /* The toast table of ustore table should also be of ustore type */ @@ -973,11 +971,11 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options) Datum values[INDEX_MAX_KEYS]; bool isnulls[INDEX_MAX_KEYS]; - if (estateIsNotNull && estate != NULL) { + if (estate != NULL) { ExprContext *econtext = GetPerTupleExprContext(estate); econtext->ecxt_scantuple = slot; } - FormIndexDatum(indexInfo, slot, estateIsNotNull ? estate : NULL, values, isnulls); + FormIndexDatum(indexInfo, slot, estate, values, isnulls); index_delete(toastidx, values, isnulls, &toasttup->ctid, false); } diff --git a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp index 2dda233752..96df2f80fd 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp @@ -707,7 +707,15 @@ void LazyVacuumUHeapRel(Relation onerel, VacuumStmt *vacstmt, BufferAccessStrate double newLiveTuples; Relation *indexrel = NULL; Partition *indexpart = NULL; - + if (IsAutoVacuumWorkerProcess()) { + /* In the autovacuum process, build fsm tree for common tables, toast tables, or partitions. */ + FreeSpaceMapVacuum(onerel); + if (vacstmt->needFreeze) { + /* Force vacuum for recycle clog. */ + ForceVacuumUHeapRelBypass(onerel, vacstmt, bstrategy); + } + return; + } /* the statFlag is used in PgStat_StatTabEntry, seen in pgstat_report_vacuum and pgstat_recv_vacuum */ uint32 statFlag = InvalidOid; if (RelationIsSubPartitionOfSubPartitionTable(onerel)) { diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index 7328acf0a5..242c5d85e6 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -384,7 +384,7 @@ static UVersionSelector UHeapSelectVersionMVCC(UTupleTidOp op, TransactionId xid * snapshot belongs to an older CID, then we need the CID for this * tuple to make a final visibility decision. */ - if (GetCurrentCommandIdUsed() || GetCurrentCommandId(false) >= snapshot->curcid) + if (GetCurrentCommandIdUsed() || GetCurrentCommandId(false) != snapshot->curcid) return UVERSION_CHECK_CID; if (op == UTUPLETID_GONE) { return UVERSION_NONE; @@ -394,8 +394,6 @@ static UVersionSelector UHeapSelectVersionMVCC(UTupleTidOp op, TransactionId xid } return UVERSION_CURRENT; } - /* Nothing has changed since our scan started. */ - return ((op == UTUPLETID_GONE) ? UVERSION_NONE : UVERSION_CURRENT); } if (!XidVisibleInSnapshot(xid, snapshot, &hintstatus, (RecoveryInProgress() ? buffer : InvalidBuffer), NULL)) { /* diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index 971c1cfcb8..77e55b0629 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -65,13 +65,13 @@ bool CheckNeedSwitch(UndoPersistence upersistence) void RollbackIfUndoExceeds(TransactionId xid, uint64 size) { t_thrd.undo_cxt.transUndoSize += size; - uint64 transUndoThresholdSize = UNDO_SPACE_THRESHOLD_PER_TRANS * BLCKSZ; + uint64 transUndoThresholdSize = GET_UNDO_LIMIT_SIZE_PER_XACT * BLCKSZ; if ((!t_thrd.xlog_cxt.InRecovery) && (t_thrd.undo_cxt.transUndoSize > transUndoThresholdSize)) { - ereport(ERROR, (errmsg(UNDOFORMAT("xid %lu, the undo size %lu of the transaction exceeds the threshold %lu." - "trans_undo_threshold_size %lu, undo_space_limit_size %lu."), - xid, t_thrd.undo_cxt.transUndoSize, transUndoThresholdSize, + ereport(ERROR, (errmsg(UNDOFORMAT("The undo size %lu of the transaction exceeds the threshold %lu." + "undo_limit_size_trans = %lu, undo_space_limit_size = %lu. Current xid = %lu."), + t_thrd.undo_cxt.transUndoSize, transUndoThresholdSize, (uint64)u_sess->attr.attr_storage.undo_limit_size_transaction, - (uint64)u_sess->attr.attr_storage.undo_space_limit_size))); + (uint64)u_sess->attr.attr_storage.undo_space_limit_size, xid))); } return; } @@ -724,7 +724,7 @@ void OnUndoProcExit(int code, Datum arg) ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("on undo exit, thrd: %d"), t_thrd.myLogicTid))); for (auto i = 0; i < UNDO_PERSISTENCE_LEVELS; i++) { UndoPersistence upersistence = static_cast(i); - if (upersistence == UNDO_TEMP || upersistence == UNDO_UNLOGGED) { + if (upersistence == UNDO_TEMP) { TransactionId topXid = GetTopTransactionIdIfAny(); undo::TransactionSlot *slot = static_cast(t_thrd.undo_cxt.slots[upersistence]); UndoSlotPtr slotPtr = t_thrd.undo_cxt.slotPtr[upersistence]; diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index f62aef7fe4..0f524d0213 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -51,6 +51,22 @@ #define ALLOCSET_UNDO_RECYCLE_MAXSIZE ALLOCSET_DEFAULT_MAXSIZE * 4 +#define CheckUndoZoneMeta(zone) \ + do { \ + if (UNDO_PTR_GET_OFFSET(allocatePtr) == UNDO_LOG_BLOCK_HEADER_SIZE && \ + UNDO_PTR_GET_OFFSET(recyclePtr) != UNDO_LOG_BLOCK_HEADER_SIZE) { \ + allocatePtr += ((uint64)1L << 32); \ + if (recyclePtr < allocatePtr) { \ + ereport(LOG, (errmodule(MOD_UNDO), \ + errmsg(UNDOFORMAT( \ + "zone meta corrupted: zid %d, old allocatePtr %lu," \ + "new allocatePtr %lu, recyclePtr %lu, frozenSlotPtr %lu."), \ + zone->GetZoneId(), zone->GetAllocateTSlotPtr(), \ + allocatePtr, recyclePtr, frozenSlotPtr))); \ + } \ + } \ + } while(0) + typedef struct UndoXmins { TransactionId oldestXmin; TransactionId recycleXmin; @@ -187,26 +203,19 @@ void AdvanceFrozenXid(UndoZone *zone, TransactionId *oldestFozenXid, { TransactionSlot *slot = NULL; UndoSlotPtr frozenSlotPtr = zone->GetFrozenSlotPtr(); - UndoSlotPtr recycle = zone->GetRecycleTSlotPtr(); - UndoSlotPtr allocate = zone->GetAllocateTSlotPtr(); - UndoSlotPtr currentSlotPtr = frozenSlotPtr > recycle ? frozenSlotPtr : recycle; + UndoSlotPtr recyclePtr = zone->GetRecycleTSlotPtr(); + UndoSlotPtr allocatePtr = zone->GetAllocateTSlotPtr(); + UndoSlotPtr currentSlotPtr = frozenSlotPtr > recyclePtr ? frozenSlotPtr : recyclePtr; UndoSlotPtr start = INVALID_UNDO_SLOT_PTR; - if (UNDO_PTR_GET_OFFSET(allocate) == UNDO_LOG_BLOCK_HEADER_SIZE && - UNDO_PTR_GET_OFFSET(recycle) != UNDO_LOG_BLOCK_HEADER_SIZE) { - allocate += ((uint64)1L << 32); - if (recycle < allocate) { - ereport(LOG, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT( - "zone meta corrupted: zid %d, old allocate %lu, new allocate %lu, recycle %lu, frozenSlotPtr %lu."), - zone->GetZoneId(), zone->GetAllocateTSlotPtr(), allocate, recycle, frozenSlotPtr))); - } - } - while (currentSlotPtr < allocate) { + + CheckUndoZoneMeta(zone); + + while (currentSlotPtr < allocatePtr) { UndoSlotBuffer& slotBuf = g_slotBufferCache->FetchTransactionBuffer(currentSlotPtr); slotBuf.PrepareTransactionSlot(currentSlotPtr); start = currentSlotPtr; bool finishAdvanceXid = false; - while (slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(currentSlotPtr) && (currentSlotPtr < allocate)) { + while (slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(currentSlotPtr) && (currentSlotPtr < allocatePtr)) { slot = slotBuf.FetchTransactionSlot(currentSlotPtr); WHITEBOX_TEST_STUB(UNDO_RECYCL_ESPACE_FAILED, WhiteboxDefaultErrorEmit); pg_read_barrier(); @@ -273,17 +282,10 @@ void AdvanceFrozenXid(UndoZone *zone, TransactionId *oldestFozenXid, bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId frozenXid, TransactionId *oldestRecycleXid, TransactionId forceRecycleXid, TransactionId oldestXmin) { - UndoSlotPtr recycle = zone->GetRecycleTSlotPtr(); - UndoSlotPtr allocate = zone->GetAllocateTSlotPtr(); - if (UNDO_PTR_GET_OFFSET(allocate) == UNDO_LOG_BLOCK_HEADER_SIZE && - UNDO_PTR_GET_OFFSET(recycle) != UNDO_LOG_BLOCK_HEADER_SIZE) { - allocate += ((uint64)1L << 32); - if (recycle < allocate) { - ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( - "zone meta corrupted, zid %d, old allocate %lu, new allocate %lu, recycle %lu, frozenSlotPtr %lu."), - zone->GetZoneId(), zone->GetAllocateTSlotPtr(), allocate, recycle, zone->GetFrozenSlotPtr()))); - } - } + UndoSlotPtr frozenSlotPtr = zone->GetFrozenSlotPtr(); + UndoSlotPtr recyclePtr = zone->GetRecycleTSlotPtr(); + UndoSlotPtr allocatePtr = zone->GetAllocateTSlotPtr(); + CheckUndoZoneMeta(zone); TransactionSlot *slot = NULL; UndoRecPtr endUndoPtr = INVALID_UNDO_REC_PTR; UndoRecPtr oldestEndUndoPtr = INVALID_UNDO_REC_PTR; @@ -299,27 +301,27 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f if (zone->GetPersitentLevel() == UNDO_PERMANENT) { needWal = true; } - while (recycle < allocate) { - UndoSlotBuffer& slotBuf = g_slotBufferCache->FetchTransactionBuffer(recycle); + while (recyclePtr < allocatePtr) { + UndoSlotBuffer& slotBuf = g_slotBufferCache->FetchTransactionBuffer(recyclePtr); UndoRecPtr startUndoPtr = INVALID_UNDO_REC_PTR; - start = recycle; - slotBuf.PrepareTransactionSlot(recycle); + start = recyclePtr; + slotBuf.PrepareTransactionSlot(recyclePtr); undoRecycled = false; - Assert(slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(recycle)); - while (slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(recycle) && (recycle < allocate)) { - slot = slotBuf.FetchTransactionSlot(recycle); + Assert(slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(recyclePtr)); + while (slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(recyclePtr) && (recyclePtr < allocatePtr)) { + slot = slotBuf.FetchTransactionSlot(recyclePtr); WHITEBOX_TEST_STUB(UNDO_RECYCL_ESPACE_FAILED, WhiteboxDefaultErrorEmit); pg_read_barrier(); if (!TransactionIdIsValid(slot->XactId())) { - RecheckUndoRecycleXid(zone, slot, recycle); + RecheckUndoRecycleXid(zone, slot, recyclePtr); break; } if (slot->StartUndoPtr() == INVALID_UNDO_REC_PTR) { break; } if (TransactionIdPrecedes(slot->XactId(), globalRecycleXid)) { - RecheckUndoRecycleXid(zone, slot, recycle); + RecheckUndoRecycleXid(zone, slot, recyclePtr); } #ifdef DEBUG_UHEAP @@ -335,7 +337,7 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f errmsg(UNDOFORMAT( "Recycle visibility check wrong: zone %d " "transaction slot %lu xid %lu slot->XactId() %lu, globalRecycleXid %lu."), - zone->GetZoneId(), recycle, recycleXid, slot->XactId(), globalRecycleXid))); + zone->GetZoneId(), recyclePtr, recycleXid, slot->XactId(), globalRecycleXid))); } } #endif @@ -356,22 +358,21 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f bool slotTranactionStateCheck = (!UHeapTransactionIdDidCommit(slot->XactId()) && !isInProgress && slot->NeedRollback()); if (slotTranactionStateCheck) { - AsyncRollback(zone, recycle, slot); + AsyncRollback(zone, recyclePtr, slot); break; } - ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("ForceRecycle: slot=%lu, slotxid=%lu, " - "recyclexid=%lu, recycleXmin=%lu, startptr=%lu, endptr=%lu, " - "glovalRecycleXid=%lu, globalFrozenXid=%lu, oldestXmin=%lu, " - "undoTotalSize=%u, undo_space_limit_size=%d, " - "metasize=%ld, g_force_recycle_size=%d."), recycle, slot->XactId(), - forceRecycleXid, recycleXmin, UNDO_PTR_GET_OFFSET(slot->StartUndoPtr()), - UNDO_PTR_GET_OFFSET(slot->EndUndoPtr()), + ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("ForceRecycle: slot = %lu, slotXid = %lu, " + "startPtr = %lu, endPtr = %lu, recycleXmin = %lu, oldestXmin = %lu, forceRecycleXid = %lu, " + "glovalRecycleXid = %lu, globalFrozenXid = %lu, " + "undoTotalSzie = %u, undoMetaSize = %ld, undoSpaceLimitSize = %d, g_forceRecycleSize = %d."), + recyclePtr, slot->XactId(), + UNDO_PTR_GET_OFFSET(slot->StartUndoPtr()), UNDO_PTR_GET_OFFSET(slot->EndUndoPtr()), + recycleXmin, oldestXmin, forceRecycleXid, pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid), pg_atomic_read_u64(&g_instance.undo_cxt.globalFrozenXid), - oldestXmin, pg_atomic_read_u32(&g_instance.undo_cxt.undoTotalSize), - u_sess->attr.attr_storage.undo_space_limit_size, - (int64)g_instance.undo_cxt.undoMetaSize, g_forceRecycleSize))); + (int64)g_instance.undo_cxt.undoMetaSize, + u_sess->attr.attr_storage.undo_space_limit_size, g_forceRecycleSize))); forceRecycle = true; } #ifdef DEBUG_UHEAP @@ -379,7 +380,7 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f #endif ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("recycle zone %d, transaction slot %lu xid %lu start ptr %lu end ptr %lu."), - zone->GetZoneId(), recycle, slot->XactId(), + zone->GetZoneId(), recyclePtr, slot->XactId(), slot->StartUndoPtr(), slot->EndUndoPtr()))); if (!startUndoPtr) { startUndoPtr = slot->StartUndoPtr(); @@ -390,8 +391,8 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f endUndoPtr = slot->EndUndoPtr(); recycleXid = slot->XactId(); undoRecycled = true; - recycle = GetNextSlotPtr(recycle); - if (slotBuf.BufBlock() != UNDO_PTR_GET_BLOCK_NUM(recycle)) { + recyclePtr = GetNextSlotPtr(recyclePtr); + if (slotBuf.BufBlock() != UNDO_PTR_GET_BLOCK_NUM(recyclePtr)) { g_slotBufferCache->RemoveSlotBuffer(start); slotBuf.Release(); } @@ -416,14 +417,14 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f zone->SetRecycleXid(recycleXid); *oldestRecycleXid = recycleXid; zone->SetForceDiscardURecPtr(endUndoPtr); - zone->SetRecycleTSlotPtr(recycle); + zone->SetRecycleTSlotPtr(recyclePtr); result = true; XLogRecPtr lsn = InvalidXLogRecPtr; if (needWal) { zone->MarkDirty(); XlogUndoDiscard xlrec; - xlrec.endSlot = recycle; + xlrec.endSlot = recyclePtr; xlrec.startSlot = start; xlrec.recycleLoops = g_recycleLoops; xlrec.recycledXid = recycleXid; @@ -434,7 +435,7 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("zone %d recycle slot start %lu end %lu from slot %lu " "to slot %lu lsn %lu xid %lu loops %lu oldestXmin %lu."), - zone->GetZoneId(), startUndoPtr, endUndoPtr, start, recycle, + zone->GetZoneId(), startUndoPtr, endUndoPtr, start, recyclePtr, lsn, recycleXid, g_recycleLoops, recycleXmin))); } END_CRIT_SECTION(); @@ -444,7 +445,7 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId f zone->UnlockUndoZone(); zone->ReleaseSpace(startUndoPtr, endUndoPtr, &g_forceRecycleSize); - zone->ReleaseSlotSpace(start, recycle, &g_forceRecycleSize); + zone->ReleaseSlotSpace(start, recyclePtr, &g_forceRecycleSize); } else { /* zone has nothing to recycle. */ break; diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp index 4b9f08e9fd..333a6d1a36 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp @@ -150,7 +150,7 @@ void UndoSpace::UnlinkUndoLog(int zid, UndoLogOffset offset, uint32 dbId) } smgrclose(reln); ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( - "unlink undo log, total blocks=%u, zid=%d, dbid=%u, head=%lu, old_head:%lu."), + "unlink undo log, total blocks=%u, zoneid=%d, dbid=%u, head=%lu, old_head:%lu."), g_instance.undo_cxt.undoTotalSize, zid, dbId, offset, old_head))); return; } @@ -171,7 +171,7 @@ void UndoSpace::unlink_residual_log(int zid, UndoLogOffset start, UndoLogOffset BlockNumber block = (BlockNumber)(start / BLCKSZ); smgrdounlink(reln, t_thrd.xlog_cxt.InRecovery, block); ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( - "unlink_residual_log, zid=%d, dbid=%u, start=%lu, end=%lu, segId:%lu, endSegId:%lu."), + "unlink_residual_log, zoneid=%d, dbid=%u, start=%lu, end=%lu, segId:%lu, endSegId:%lu."), zid, db_id, start, end, start/seg_size, end/seg_size))); start += seg_size; } @@ -196,7 +196,7 @@ void UndoSpace::CreateNonExistsUndoFile(int zid, uint32 dbId) if (!smgrexists(reln, MAIN_FORKNUM, blockno)) { smgrextend(reln, MAIN_FORKNUM, blockno, NULL, false); ereport(DEBUG1, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("undo file not exists: zid %d, blockno %u, dbid %u."), + errmsg(UNDOFORMAT("undo file not exists: zoneid %d, blockno %u, dbid %u."), zid, blockno, dbId))); pg_atomic_fetch_add_u32(&g_instance.undo_cxt.undoTotalSize, segBlocks); } @@ -456,7 +456,7 @@ void UndoSpace::RecoveryUndoSpace(int fd, UndoSpaceType type) } pg_atomic_fetch_add_u32(&g_instance.undo_cxt.undoTotalSize, usp->Used(zoneId)); UndoZoneVerify(uzone); - uint64 transUndoThresholdSize = UNDO_SPACE_THRESHOLD_PER_TRANS * BLCKSZ; + uint64 transUndoThresholdSize = GET_UNDO_LIMIT_SIZE_PER_XACT * BLCKSZ; const uint64 MAX_OFFSET = (UNDO_LOG_MAX_SIZE - transUndoThresholdSize) - segSize; if (usp->Tail() < usp->Head() || usp->Tail() > MAX_OFFSET) { g_instance.undo_cxt.uZoneBitmap[UNDO_PERMANENT] = diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp index 18154077ca..913be07b89 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp @@ -274,15 +274,9 @@ void UndoSlotBufferCache::RemoveSlotBuffer(UndoSlotPtr slotPtr) } size_--; if (size_ != 0) { - UndoSlotPtr headTag = INVALID_UNDO_SLOT_PTR; - UndoSlotPtr tailTag = INVALID_UNDO_SLOT_PTR; - if (head_ != NULL) { - headTag = head_->tag_; - } - if (tail_ != NULL) { - tailTag = tail_->tag_; - } - ereport(DEBUG1, (errmsg(UNDOFORMAT("release entry %lu head_ %lu tail_ %lu size %d."), + UndoSlotPtr headTag = (head_ != NULL) ? head_->tag_ : INVALID_UNDO_SLOT_PTR; + UndoSlotPtr tailTag = (tail_ != NULL) ? tail_->tag_ : INVALID_UNDO_SLOT_PTR; + ereport(DEBUG1, (errmsg(UNDOFORMAT("release entry tag %lu head %lu tail %lu size %d."), entry->tag_, headTag, tailTag, size_))); } else { ereport(DEBUG1, (errmsg(UNDOFORMAT("release entry %lu.SlotBuffer is empty"), diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp index 5d6c6859c7..4fe7df7266 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp @@ -85,7 +85,7 @@ bool UndoZone::CheckNeedSwitch(void) slotSpace_.Tail()))); return true; } - uint64 transUndoThresholdSize = UNDO_SPACE_THRESHOLD_PER_TRANS * BLCKSZ; + uint64 transUndoThresholdSize = GET_UNDO_LIMIT_SIZE_PER_XACT * BLCKSZ; UndoLogOffset newInsert = UNDO_LOG_OFFSET_PLUS_USABLE_BYTES(insertURecPtr_, transUndoThresholdSize); if (unlikely(newInsert + UNDO_LOG_SEGMENT_SIZE > UNDO_LOG_MAX_SIZE || undoSpace_.Tail() + UNDO_LOG_SEGMENT_SIZE > UNDO_LOG_MAX_SIZE || @@ -798,13 +798,12 @@ static int ReleaseUndoZoneId(int zid, UndoPersistence upersistence) return tempZid; } -static UndoZone *getUnusedZone(UndoPersistence upersistence, int *retZid, int oldZid) +static UndoZone *GetUnusedZone(UndoPersistence upersistence, int *retZid, int oldZid) { int zid = -1; UndoZone *newUzone = NULL; if (upersistence >= UNDO_PERSISTENT_BUTT || upersistence < UNDO_PERMANENT) { - ereport(ERROR, (errmsg("getUnusedZone upersistence out of range [%d]", - upersistence))); + ereport(ERROR, (errmsg("GetUnusedZone upersistence out of range [%d]", upersistence))); } int basecount = (int)upersistence * PERSIST_ZONE_COUNT; for (int i = PERSIST_ZONE_COUNT - 1; i >= 0; i--) { @@ -812,7 +811,7 @@ static UndoZone *getUnusedZone(UndoPersistence upersistence, int *retZid, int ol newUzone = UndoZoneGroup::GetUndoZone(zid, true); if (newUzone == NULL) { zid = -1; - ereport(WARNING, (errmsg(UNDOFORMAT("can not palloc undo zone memory for zone %d"), zid))); + ereport(WARNING, (errmsg(UNDOFORMAT("Can not palloc undo zone memory for zone %d"), zid))); continue; } if (newUzone->Attached() || newUzone->GetPersitentLevel() != upersistence || @@ -838,7 +837,7 @@ static UndoZone *getUnusedZone(UndoPersistence upersistence, int *retZid, int ol ereport(ERROR, (errmsg("SwitchZone: zone id is invalid, there're too many working threads."))); } *retZid = zid; - g_instance.undo_cxt.uZoneBitmap[upersistence] = + g_instance.undo_cxt.uZoneBitmap[upersistence] = bms_del_member(g_instance.undo_cxt.uZoneBitmap[upersistence], (zid - basecount)); return newUzone; } @@ -886,7 +885,7 @@ UndoZone *UndoZoneGroup::SwitchZone(int zid, UndoPersistence upersistence) uzone->PrepareSwitch(); LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); uzone->ReleaseSlotBuffer(); - UndoZone *newUzone = getUnusedZone(upersistence, &retZid, zid); + UndoZone *newUzone = GetUnusedZone(upersistence, &retZid, zid); WHITEBOX_TEST_STUB(UNDO_SWITCH_ZONE_FAILED, WhiteboxDefaultErrorEmit); newUzone->Attach(); LWLockRelease(UndoZoneLock); diff --git a/src/gausskernel/storage/tcap/tcap_drop.cpp b/src/gausskernel/storage/tcap/tcap_drop.cpp index 5cf3bd79a5..a97223c3e9 100644 --- a/src/gausskernel/storage/tcap/tcap_drop.cpp +++ b/src/gausskernel/storage/tcap/tcap_drop.cpp @@ -1112,7 +1112,7 @@ static bool TrNeedPhyDelete(Relation depRel, ObjectAddresses *targetObjects, Obj /* Step 1: tag refobjs of thisobj, return directly if ALL refobjs not need physical drop. */ for (int i = 0; i < refobjs->numrefs; i++) { item = refobjs->refs + i; - /* Skip the judgemnt on the obj in the pg_proc table and perform logical delection + /* Skip the judgemnt on the obj in the pg_proc table and perform logical delection * by default. */ if (item->classId != ProcedureRelationId && !TrObjIsInList(targetObjects, item)) { diff --git a/src/include/access/hio.h b/src/include/access/hio.h index c6ea2275ab..432eec40d0 100644 --- a/src/include/access/hio.h +++ b/src/include/access/hio.h @@ -20,7 +20,6 @@ #include "utils/relcache.h" #include "storage/buf/buf.h" #include "storage/buf/bufmgr.h" -#include "ubtree.h" /* * state for bulk inserts --- private to heapam.c and hio.c @@ -40,6 +39,6 @@ extern Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer othe BulkInsertState bistate, Buffer* vmbuffer, Buffer* vmbuffer_other, BlockNumber end_rel_block); extern Buffer RelationGetNewBufferForBulkInsert(Relation relation, Size len, Size dictSize, BulkInsertState bistate); extern Buffer ReadBufferBI(Relation relation, BlockNumber targetBlock, ReadBufferMode mode, BulkInsertState bistate); -extern void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate, NewPageState* npState = NULL); +extern void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate); #endif /* HIO_H */ diff --git a/src/include/access/ubtree.h b/src/include/access/ubtree.h index 33c9e68499..7a6a67d8e8 100644 --- a/src/include/access/ubtree.h +++ b/src/include/access/ubtree.h @@ -103,8 +103,6 @@ typedef UBTRecycleQueueHeaderData* UBTRecycleQueueHeader; #define URQ_HEAD_PAGE (1 << 0) #define URQ_TAIL_PAGE (1 << 1) -#define URQ_MAX_GET_PAGE_TIMES 5 - #define UBTRecycleMaxItems \ ((BLCKSZ - sizeof(PageHeaderData) - offsetof(UBTRecycleQueueHeaderData, items)) / sizeof(UBTRecycleQueueItemData)) @@ -170,6 +168,7 @@ typedef UBTRecycleQueueHeaderData* UBTRecycleQueueHeader; #define UBTREE_VERIFY_OUTPUT_PARAM_CNT 3 #define UBTREE_RECYCLE_OUTPUT_PARAM_CNT 6 #define UBTREE_RECYCLE_OUTPUT_XID_STR_LEN 32 +#define URQ_GET_PAGE_MAX_RETRY_TIMES 5 enum { UBTREE_MARK_DELETE_BLOCK_NUM, @@ -478,32 +477,25 @@ typedef struct { } IndexPruneState; typedef struct { - TimestampTz firstGetAvailablePageTime; - TimestampTz secondGetAvailablePageTime; - TimestampTz extendBlocksTime; + TimestampTz getAvailablePageTime; + TimestampTz getAvailablePageTimeMax; + TimestampTz addExtraBlocksTime; + TimestampTz addExtraBlocksTimeMax; TimestampTz extendOneTime; - TimestampTz getHeadTime; - TimestampTz getAvailablePageOnPageTime; - TimestampTz getAvailablePageOnPageTimeMax; - uint32 firstGetAvailablePageCount; - uint32 secondGetAvailablePageCount; - uint32 bufferInvalidCount; - uint32 needLockCount; - uint32 extendBlocksCount; - uint32 extendBlocks; + TimestampTz extendOneTimeMax; + TimestampTz getOnUrqPageTime; + TimestampTz getOnUrqPageTimeMax; + uint32 getAvailablePageCount; + uint32 addExtraBlocksCount; uint32 extendOneCount; - uint32 queueCount; - uint32 itemsCount; - uint32 itemsValidCount; - uint32 itemsValidConditionalLockCount; - uint32 getAvailablePageOnPageCount; - uint32 firstGotoRestartCount; - uint32 secondGotoRestartCount; - uint32 checkNewCreatePagesCount; - uint32 getFromNewCreatePagesCount; - double avgTravelQueuePages; - double avgTravelQueueItems; -} NewPageState; + uint32 getOnUrqPageCount; + uint32 urqItemsCount; + uint32 restartCount; + uint32 checkNonTrackedPagesCount; + Oid spcnode; + Oid dbnode; + Oid relnode; +} UBTreeGetNewPageStats; typedef RpSort ItemIdSort; typedef RpSortData ItemIdSortData; @@ -580,8 +572,7 @@ extern OffsetNumber UBTreeFindsplitloc(Relation rel, Buffer buf, OffsetNumber ne extern OffsetNumber UBTreeFindsplitlocInsertpt(Relation rel, Buffer buf, OffsetNumber newitemoff, Size newitemsz, bool *newitemonleft, IndexTuple newitem); -extern Buffer UBTreeGetNewPage(Relation rel, UBTRecycleQueueAddress* addr, NewPageState* npState = NULL); -extern void UBTreePrintNewPageState(NewPageState* npstate); +extern Buffer UBTreeGetNewPage(Relation rel, UBTRecycleQueueAddress* addr); /* * prototypes for functions in ubtxlog.cpp */ @@ -649,6 +640,13 @@ typedef enum IndexTraceLevel { TRACE_ALL } IndexTraceLevel; +typedef enum NewPageCostType { + GET_PAGE, + ADD_BLOCKS, + EXTEND_ONE, + URQ_GET_PAGE +} NewPageCostType; + /* * prototypes for functions in ubtrecycle.cpp */ @@ -661,7 +659,7 @@ extern void UBTreeRecordFreePage(Relation rel, BlockNumber blkno, TransactionId extern void UBTreeRecordEmptyPage(Relation rel, BlockNumber blkno, TransactionId xid); extern void UBTreeRecordUsedPage(Relation rel, UBTRecycleQueueAddress addr); extern Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBTRecycleQueueAddress* addr, - NewPageState* npState = NULL); + UBTreeGetNewPageStats* stats = NULL); extern void UBTreeRecycleQueueInitPage(Relation rel, Page page, BlockNumber blkno, BlockNumber prevBlkno, BlockNumber nextBlkno); extern void UBtreeRecycleQueueChangeChain(Buffer buf, BlockNumber newBlkno, bool setNext); @@ -682,4 +680,5 @@ void UBTRecycleQueueVerifyPageOffline(Relation rel, Page page, BlockNumber blkno void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); void UBTreeVerifyAll(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); +void UBTreeRecordGetNewPageCost(UBTreeGetNewPageStats* stats, NewPageCostType type, TimestampTz start); #endif /* UBTREE_H */ diff --git a/src/include/access/ustore/knl_undorequest.h b/src/include/access/ustore/knl_undorequest.h index 9e740465f6..4402cee32a 100644 --- a/src/include/access/ustore/knl_undorequest.h +++ b/src/include/access/ustore/knl_undorequest.h @@ -94,7 +94,7 @@ void ExecuteUndoForInsertRecovery(Buffer buffer, OffsetNumber off, TransactionId bool UHeapUndoActionsOpenRelation(Oid reloid, Oid partitionoid, UndoRelationData *relationData); void UHeapUndoActionsCloseRelation(UndoRelationData *relationData); bool UHeapUndoActionsFindRelidByRelfilenode(RelFileNode *relfilenode, Oid *reloid, Oid *partitionoid); -bool ExecuteUndoActionsPageForPartition(Relation src, SMgrRelation dest, ForkNumber forkNum, BlockNumber srcBlkno, +bool ExecuteUndoActionsForPartition(Relation src, SMgrRelation dest, ForkNumber forkNum, BlockNumber srcBlkno, BlockNumber destBlkno, RollBackTypeForAlterTable opType, PartitionToastInfo *toastInfo = NULL); #endif diff --git a/src/include/access/ustore/undo/knl_uundotype.h b/src/include/access/ustore/undo/knl_uundotype.h index 276c87c650..260e7410b0 100644 --- a/src/include/access/ustore/undo/knl_uundotype.h +++ b/src/include/access/ustore/undo/knl_uundotype.h @@ -221,11 +221,11 @@ typedef struct MiniSlot { nodeNo = 0; \ } -#define UNDO_SPACE_THRESHOLD_PER_TRANS \ +#define GET_UNDO_LIMIT_SIZE_PER_XACT \ (uint64)((u_sess->attr.attr_storage.undo_limit_size_transaction) > \ - (u_sess->attr.attr_storage.undo_space_limit_size) \ - ? (u_sess->attr.attr_storage.undo_space_limit_size) \ - : (u_sess->attr.attr_storage.undo_limit_size_transaction)) + (u_sess->attr.attr_storage.undo_space_limit_size) \ + ? (u_sess->attr.attr_storage.undo_space_limit_size) \ + : (u_sess->attr.attr_storage.undo_limit_size_transaction)) #define PERSIST_ZONE_COUNT (UNDO_ZONE_COUNT / UNDO_PERSISTENCE_LEVELS) #define ZONE_COUNT_PER_LEVEL_NODE(nodeCount) (UNDO_ZONE_COUNT / UNDO_PERSISTENCE_LEVELS / nodeCount) diff --git a/src/include/utils/be_module.h b/src/include/utils/be_module.h index d7f5af85ca..7a57eacfc5 100755 --- a/src/include/utils/be_module.h +++ b/src/include/utils/be_module.h @@ -150,7 +150,7 @@ enum ModuleId { MOD_GPI, /* debug info for global partition index */ MOD_PARTITION, - MOD_UBT_NEWPAGE, /* debug info for UBTreeGetNewPage */ + MOD_UBTREE, /* debug info for UBTree */ MOD_SRF, /* debug info for SRF */ MOD_SS_TXNSTATUS, /* debug info for SS TXNSTATUS */ diff --git a/src/test/regress/expected/test_ustore_partition.out b/src/test/regress/expected/test_ustore_partition.out index d8a32e5aff..57449e7ec1 100644 --- a/src/test/regress/expected/test_ustore_partition.out +++ b/src/test/regress/expected/test_ustore_partition.out @@ -211,37 +211,42 @@ INSERT INTO ustore_part values(1, 10) on duplicate key update c3 = 10; INSERT INTO ustore_part values(1, 11) on duplicate key update c3 = 10; drop table ustore_part; create table ustore_part( - tc1 timetz default now(), a int, - id int, b text -) with (storage_type=USTORE) -partition by range(id) +) with (storage_type=USTORE) +partition by range(a) ( -partition ustore_part_p1 values less than(4), -partition ustore_part_p2 values less than(7), -partition ustore_part_p3 values less than(10) +partition part1 values less than(10), +partition part2 values less than(20), +partition part3 values less than(30), +partition part4 values less than(40), +partition part5 values less than(50) ); -INSERT INTO ustore_part values(now(), 1, generate_series(1,3), 'p1'); -INSERT INTO ustore_part values(now(), 2, generate_series(4,6), 'p2'); -INSERT INTO ustore_part values(now(), 3, generate_series(7,9), 'p3'); -INSERT INTO ustore_part values(now(), NULL, generate_series(3,4), 'pn'); -select ctid, a, id ,b from ustore_part; - ctid | a | id | b --------+---+----+---- - (0,1) | 1 | 1 | p1 - (0,2) | 1 | 2 | p1 - (0,3) | 1 | 3 | p1 - (0,4) | | 3 | pn - (0,1) | 2 | 4 | p2 - (0,2) | 2 | 5 | p2 - (0,3) | 2 | 6 | p2 - (0,4) | | 4 | pn - (0,1) | 3 | 7 | p3 - (0,2) | 3 | 8 | p3 - (0,3) | 3 | 9 | p3 -(11 rows) +INSERT INTO ustore_part values(generate_series(0, 2), 'partition_a_lt_10'); +INSERT INTO ustore_part values(generate_series(10, 12), 'partition_a_lt_20'); +INSERT INTO ustore_part values(generate_series(20, 22), 'partition_a_lt_30'); +INSERT INTO ustore_part values(generate_series(30, 32), 'partition_a_lt_40'); +INSERT INTO ustore_part values(generate_series(40, 42), 'partition_a_lt_50'); +select * from ustore_part; + a | b +----+------------------- + 0 | partition_a_lt_10 + 1 | partition_a_lt_10 + 2 | partition_a_lt_10 + 10 | partition_a_lt_20 + 11 | partition_a_lt_20 + 12 | partition_a_lt_20 + 20 | partition_a_lt_30 + 21 | partition_a_lt_30 + 22 | partition_a_lt_30 + 30 | partition_a_lt_40 + 31 | partition_a_lt_40 + 32 | partition_a_lt_40 + 40 | partition_a_lt_50 + 41 | partition_a_lt_50 + 42 | partition_a_lt_50 +(15 rows) drop table ustore_part; reset search_path; -drop schema test_ustore_part cascade; +drop schema test_ustore_part cascade; \ No newline at end of file diff --git a/src/test/regress/sql/test_ustore_partition.sql b/src/test/regress/sql/test_ustore_partition.sql index 42754bae54..b72d41a95a 100644 --- a/src/test/regress/sql/test_ustore_partition.sql +++ b/src/test/regress/sql/test_ustore_partition.sql @@ -157,23 +157,23 @@ INSERT INTO ustore_part values(1, 11) on duplicate key update c3 = 10; drop table ustore_part; create table ustore_part( - tc1 timetz default now(), a int, - id int, b text -) with (storage_type=USTORE) -partition by range(id) +) with (storage_type=USTORE) +partition by range(a) ( -partition ustore_part_p1 values less than(4), -partition ustore_part_p2 values less than(7), -partition ustore_part_p3 values less than(10) +partition part1 values less than(10), +partition part2 values less than(20), +partition part3 values less than(30), +partition part4 values less than(40), +partition part5 values less than(50) ); -INSERT INTO ustore_part values(now(), 1, generate_series(1,3), 'p1'); -INSERT INTO ustore_part values(now(), 2, generate_series(4,6), 'p2'); -INSERT INTO ustore_part values(now(), 3, generate_series(7,9), 'p3'); -INSERT INTO ustore_part values(now(), NULL, generate_series(3,4), 'pn'); -select ctid, a, id ,b from ustore_part; +INSERT INTO ustore_part values(generate_series(0, 2), 'partition_a_lt_10'); +INSERT INTO ustore_part values(generate_series(10, 12), 'partition_a_lt_20'); +INSERT INTO ustore_part values(generate_series(20, 22), 'partition_a_lt_30'); +INSERT INTO ustore_part values(generate_series(30, 32), 'partition_a_lt_40'); +INSERT INTO ustore_part values(generate_series(40, 42), 'partition_a_lt_50'); +select * from ustore_part; drop table ustore_part; - reset search_path; drop schema test_ustore_part cascade; \ No newline at end of file -- Gitee From a1643f3d401a2acfec0f380724138d10ad8ae105 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Sat, 10 Aug 2024 11:44:45 +0800 Subject: [PATCH 211/347] =?UTF-8?q?=E5=8F=8C=E9=9B=86=E7=BE=A4=E8=B7=B3?= =?UTF-8?q?=E8=BF=87CheckForRestartPoint=20=EF=BC=88cherry=20picked=20comm?= =?UTF-8?q?it=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index d1c1d6cbdb..91658b8fb6 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -8580,7 +8580,7 @@ inline void PrintCkpXctlControlFile(XLogRecPtr oldCkpLoc, CheckPoint *oldCkp, XL void CheckForRestartPoint() { - if (SS_IN_ONDEMAND_RECOVERY) { + if (SS_IN_ONDEMAND_RECOVERY || SS_DISASTER_CLUSTER) { return; } -- Gitee From 9f68af5fe0b04583484648a6eb4a96798426358f Mon Sep 17 00:00:00 2001 From: zzh Date: Tue, 27 Aug 2024 10:10:10 +0800 Subject: [PATCH 212/347] =?UTF-8?q?=E5=A2=9E=E5=8A=A0gs=5Fctl=20init?= =?UTF-8?q?=E5=B7=A5=E5=85=B7--pwfile=E5=8F=82=E6=95=B0=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/initdb/initdb.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/bin/initdb/initdb.cpp b/src/bin/initdb/initdb.cpp index 35aa599634..737e7b7c99 100644 --- a/src/bin/initdb/initdb.cpp +++ b/src/bin/initdb/initdb.cpp @@ -1960,6 +1960,9 @@ static void get_set_pwd(void) pwdbuf[--i] = '\0'; pwd1 = xstrdup(pwdbuf); + if (!CheckInitialPasswd(username, pwd1)) { + exit_nicely(); + } } else if (pwprompt) { /* else get password from readline */ -- Gitee From a88bcd7233bf1326156c7b4657d8dfdbce423f34 Mon Sep 17 00:00:00 2001 From: cchen676 Date: Tue, 27 Aug 2024 17:00:23 +0800 Subject: [PATCH 213/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BC=A0=E7=BB=9F?= =?UTF-8?q?=E9=9B=86=E7=BE=A4=E7=9A=84=E6=85=A2sql=E6=9F=A5=E8=AF=A2?= =?UTF-8?q?=E6=9F=A5=E5=87=BA=E6=9D=A5dms=E7=AD=89=E5=BE=85=E4=BA=8B?= =?UTF-8?q?=E4=BB=B6=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/cbb/instruments/statement/instr_statement.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp index 6479ac1e27..a536b55a81 100755 --- a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp @@ -2414,11 +2414,11 @@ static int32 get_wait_events_idx_in_bms(uint32 class_id, uint32 event_id) break; case PG_WAIT_DMS: ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] tracked event - DMS"))); - event_idx = event_id; + event_idx = event_id + wait_event_io_event_max_index; break; case PG_WAIT_LOCK: ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] tracked event - LOCK"))); - event_idx = event_id + wait_event_io_event_max_index; + event_idx = event_id + wait_event_dms_event_max_index; break; case PG_WAIT_LWLOCK: ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] tracked event - LWLOCK"))); -- Gitee From c1af2e6b39a3975dd00089662461e95b9a9c9639 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Thu, 22 Aug 2024 18:07:17 +0800 Subject: [PATCH 214/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4swicthover=20pageredo=E7=BA=BF=E7=A8=8Bclean=20up?= =?UTF-8?q?=E9=94=81=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/lmgr/proc.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/lmgr/proc.cpp b/src/gausskernel/storage/lmgr/proc.cpp index 60aecb07bf..2b431196b3 100755 --- a/src/gausskernel/storage/lmgr/proc.cpp +++ b/src/gausskernel/storage/lmgr/proc.cpp @@ -54,6 +54,7 @@ #include "postmaster/cfs_shrinker.h" #include "postmaster/rbcleaner.h" #include "replication/slot.h" +#include "replication/ss_disaster_cluster.h" #ifdef PGXC #include "pgxc/pgxc.h" #include "pgxc/poolmgr.h" @@ -2491,7 +2492,7 @@ void ProcSendSignal(ThreadId pid) { PGPROC* proc = NULL; - if (RecoveryInProgress() || SS_IN_ONDEMAND_RECOVERY) { + if (RecoveryInProgress() || SS_IN_ONDEMAND_RECOVERY || SS_DISASTER_STANDBY_CLUSTER) { ProcBaseLockAccquire(&g_instance.proc_base_mutex_lock); /* -- Gitee From 3797839c9c4d7899510945cf841f6e29f418ca78 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Wed, 14 Aug 2024 19:11:54 +0800 Subject: [PATCH 215/347] =?UTF-8?q?=E5=8F=8C=E9=9B=86=E7=BE=A4=E5=A4=87?= =?UTF-8?q?=E9=9B=86=E7=BE=A4=E8=B7=B3=E8=BF=87=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/smgr/segment/segbuffer.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/storage/smgr/segment/segbuffer.cpp b/src/gausskernel/storage/smgr/segment/segbuffer.cpp index 144f661b0c..2b9def81bc 100644 --- a/src/gausskernel/storage/smgr/segment/segbuffer.cpp +++ b/src/gausskernel/storage/smgr/segment/segbuffer.cpp @@ -323,7 +323,8 @@ void SegFlushCheckDiskLSN(SegSpace *spc, RelFileNode rNode, ForkNumber forknum, BufferDesc *buf_desc, char *buf) { #ifndef USE_ASSERT_CHECKING - if (!IsInitdb && !RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && !SS_DISASTER_STANDBY_CLUSTER) { + if (!IsInitdb && !RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && + !SS_DISASTER_STANDBY_CLUSTER && !g_instance.dms_cxt.SSRecoveryInfo.disaster_cluster_promoting) { dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(buf_desc->buf_id); XLogRecPtr lsn_on_mem = PageGetLSN(buf); /* latest page must satisfy condition: page lsn_on_disk bigger than transfered page which is latest page */ @@ -335,7 +336,8 @@ void SegFlushCheckDiskLSN(SegSpace *spc, RelFileNode rNode, ForkNumber forknum, } } #else - if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && ENABLE_VERIFY_PAGE_VERSION && !SS_DISASTER_STANDBY_CLUSTER) { + if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && ENABLE_VERIFY_PAGE_VERSION && + !SS_DISASTER_STANDBY_CLUSTER && !g_instance.dms_cxt.SSRecoveryInfo.disaster_cluster_promoting) { char *origin_buf = (char *)palloc(BLCKSZ + ALIGNOF_BUFFER); char *temp_buf = (char *)BUFFERALIGN(origin_buf); seg_physical_read(spc, rNode, forknum, blocknum, temp_buf); -- Gitee From 4997c8f4a6aaa45a3bdeadfcf87a088d1b08d158 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Tue, 20 Aug 2024 14:52:53 +0800 Subject: [PATCH 216/347] =?UTF-8?q?=E7=BD=91=E7=BB=9C=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4=E8=B7=B3=E8=BF=87walrec=20term=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/replication/libpqwalreceiver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/replication/libpqwalreceiver.cpp b/src/gausskernel/storage/replication/libpqwalreceiver.cpp index 32efa053e4..10b1bf7f0d 100755 --- a/src/gausskernel/storage/replication/libpqwalreceiver.cpp +++ b/src/gausskernel/storage/replication/libpqwalreceiver.cpp @@ -577,7 +577,7 @@ static int32 IdentifyRemoteVersion() (errcode(ERRCODE_INVALID_STATUS), errmsg("could not get the local protocal version, make sure the PG_PROTOCOL_VERSION is defined"))); } - if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE && !SS_DORADO_MAIN_STANDBY_NODE) { + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE && !SS_DISASTER_MAIN_STANDBY_NODE) { if (walrcv->conn_target != REPCONNTARGET_DUMMYSTANDBY && (localTerm == 0 || localTerm > remoteTerm) && !AM_HADR_WAL_RECEIVER) { PQclear(res); -- Gitee From 920adcf85103e361778dbac911bd6c6cbe072fb1 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Tue, 20 Aug 2024 16:49:22 +0800 Subject: [PATCH 217/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4build=E5=AE=8C=E6=88=90=E5=88=87=E4=B8=BB=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index b03251dc5b..d709b86fb5 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -20441,7 +20441,7 @@ retry: /* In ss dorado replication, we don't start walrecwrite thread, so t_thrd.xlog_cxt.receivedUpto = 0 */ if (t_thrd.xlog_cxt.readFile < 0 || (t_thrd.xlog_cxt.readSource == XLOG_FROM_STREAM && XLByteLT(t_thrd.xlog_cxt.receivedUpto, RecPtr))) { - if (t_thrd.xlog_cxt.StandbyMode && t_thrd.xlog_cxt.startup_processing) { + if (t_thrd.xlog_cxt.startup_processing) { for (;;) { /* * Need to check here also for the case where consistency level is -- Gitee From 32c40d4d3a7bc56b55cb763e77acd0306b3b7a9d Mon Sep 17 00:00:00 2001 From: muyulinzhong Date: Tue, 13 Aug 2024 17:43:26 +0800 Subject: [PATCH 218/347] =?UTF-8?q?[bugFix]=E4=BF=AE=E6=94=B9fd=20=3D=200?= =?UTF-8?q?=E7=9A=84dss=E5=BC=82=E5=B8=B8=E5=9C=BA=E6=99=AF=E6=83=85?= =?UTF-8?q?=E5=86=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/smgr/segment/data_file.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/gausskernel/storage/smgr/segment/data_file.cpp b/src/gausskernel/storage/smgr/segment/data_file.cpp index 16253c6399..e345a98c42 100644 --- a/src/gausskernel/storage/smgr/segment/data_file.cpp +++ b/src/gausskernel/storage/smgr/segment/data_file.cpp @@ -36,6 +36,8 @@ #include "postmaster/pagerepair.h" #include "ddes/dms/ss_common_attr.h" +#define DSS_FD_CHECK(fd) (ENABLE_DSS) && (fd < DSS_HANDLE_BASE) + static const mode_t SEGMENT_FILE_MODE = S_IWUSR | S_IRUSR; static int dv_open_file(char *filename, int flags, int mode); @@ -492,6 +494,13 @@ void df_open_files(SegLogicFile *sf) void df_extend_internal(SegLogicFile *sf) { int fd = sf->segfiles[sf->file_num - 1].fd; + + if (DSS_FD_CHECK(fd)) { + /* If fd = 0 under the DSS environment, we should open again. */ + char *filename = slice_filename(sf->filename, sf->file_num - 1); + fd = dv_open_file(filename, O_RDWR | PG_BINARY, (int)SEGMENT_FILE_MODE); + sf->segfiles[sf->file_num - 1].fd = fd; + } off_t last_file_size = lseek(fd, 0L, SEEK_END); SegmentCheck(last_file_size <= DF_FILE_SLICE_SIZE); -- Gitee From a40b6aa307432619ebdc4ae07bb903c6bb19d10a Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Tue, 27 Aug 2024 17:46:50 +0800 Subject: [PATCH 219/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E6=8C=89=E9=9C=80?= =?UTF-8?q?=E5=9B=9E=E6=94=BE=E5=AE=9E=E6=97=B6=E6=9E=84=E5=BB=BA=E4=B8=8B?= =?UTF-8?q?=EF=BC=8C=E4=B8=BB=E6=9C=BAredo=E7=82=B9=E4=BD=8D=E5=8F=91?= =?UTF-8?q?=E9=80=81=E9=94=99=E8=AF=AF=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_callback.cpp | 2 +- .../transam/ondemand_extreme_rto/redo_utils.cpp | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 20ff559cb9..a0009f049e 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -2188,7 +2188,7 @@ int CBDoCheckpointImmediately(unsigned long long *ckpt_lsn) Assert(SS_PRIMARY_MODE); RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_WAIT); - *ckpt_lsn = (unsigned long long)t_thrd.shemem_ptr_cxt.ControlFile->checkPoint; + *ckpt_lsn = (unsigned long long)t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.redo; return GS_SUCCESS; } diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index b2b639caf3..b066c3bec6 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -657,9 +657,19 @@ void OnDemandWaitRealtimeBuildShutDown() ondemand_extreme_rto::WaitRealtimeBuildShutdown(); } +// only used in ondemand realtime build, for update xlog redo loc in failover void OnDemandUpdateRealtimeBuildPrunePtr() { - ondemand_extreme_rto::UpdateCheckpointRedoPtrForPrune(t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.redo); + XLogRecPtr primaryRedoLsn = t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.redo; +#ifdef USE_ASSERT_CHECKING + if (XLByteLT(primaryRedoLsn, ondemand_extreme_rto::g_dispatcher->ckptRedoPtr)) { + ereport(PANIC, (errmodule(MOD_DMS), errmsg("[SS][On-demand] redo loc %X/%X in primary node %d is less than " + "realtime build node %d, prune loc %X/%X", (uint32)(primaryRedoLsn >> 32), (uint32)primaryRedoLsn, + SS_PRIMARY_ID, SS_MY_INST_ID, (uint32)(ondemand_extreme_rto::g_dispatcher->ckptRedoPtr >> 32), + (uint32)ondemand_extreme_rto::g_dispatcher->ckptRedoPtr))); + } +#endif + ondemand_extreme_rto::UpdateCheckpointRedoPtrForPrune(primaryRedoLsn); } void OnDemandBackupControlFile(ControlFileData* controlFile) { -- Gitee From fbfd47253c0da2c7712da930341d9746a8175f12 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Wed, 28 Aug 2024 10:00:00 +0800 Subject: [PATCH 220/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=B5=81=E6=B0=B4?= =?UTF-8?q?=E7=BA=BF=E7=BC=96=E8=AF=91TSDB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GNUmakefile.in | 2 +- build/script/utils/make_compile.sh | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/GNUmakefile.in b/GNUmakefile.in index 2e95bfab4d..cc5a7b9b0e 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -102,7 +102,7 @@ install: @if test -d contrib/gms_stats; then $(MAKE) -C contrib/gms_stats $@; fi @if test -d contrib/gms_profiler; then $(MAKE) -C contrib/gms_profiler $@; fi @if test -d contrib/gms_output; then $(MAKE) -C contrib/gms_output $@; fi - @if test -d contrib/timescaledb; then export CC=${GCC_PATH}/gcc/bin/gcc; export CXX=${GCC_PATH}/gcc/bin/g++; (cd contrib/timescaledb && ./run_to_build.sh && cd build && $(MAKE) $@); fi + @if test -d contrib/timescaledb; then (./contrib/timescaledb/run_to_build.sh && $(MAKE) -C contrib/timescaledb/build $@); fi +@echo "openGauss installation complete." endif endif diff --git a/build/script/utils/make_compile.sh b/build/script/utils/make_compile.sh index bcbdebdf9f..e4a14611dd 100644 --- a/build/script/utils/make_compile.sh +++ b/build/script/utils/make_compile.sh @@ -247,11 +247,6 @@ function install_gaussdb() fi fi fi - -#tsdb prepare - if [ -d "$CODE_BASE/contrib/timescaledb" ]; then - cp $CODE_BASE/contrib/timescaledb/og-timescaledb1.7.4.sql ${GAUSSHOME}/share/postgresql/extension/timescaledb--1.7.4.sql - fi cd "$ROOT_DIR/contrib/pg_upgrade_support" make clean >> "$LOG_FILE" 2>&1 -- Gitee From a8e1cb9eb510cf8d939b5a2ebaee6cfb4aa78c3c Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 22 Aug 2024 19:23:37 +0800 Subject: [PATCH 221/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E8=87=AA=E5=A2=9E=E5=88=97=E4=B8=BB=E9=94=AE=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=20=EF=BC=88cherry=20picked=20commit?= =?UTF-8?q?=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/tablecmds.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index f5f8269bae..721eb8b5d1 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -33431,7 +33431,8 @@ void CheckRelAutoIncrementIndex(Oid relid, LOCKMODE lockmode) Relation idxrel = index_open(lfirst_oid(l), AccessShareLock); Form_pg_index index = idxrel->rd_index; - if (IndexIsValid(index) && (index->indisunique || index->indisprimary) && + /* auto_increment column in dolphin support non-unique/primary index */ + if (IndexIsValid(index) && (u_sess->attr.attr_sql.dolphin || index->indisunique || index->indisprimary) && index->indkey.values[0] == autoinc_attnum) { found = true; index_close(idxrel, AccessShareLock); -- Gitee From e96434592733c9097f8df0254230d739373b5069 Mon Sep 17 00:00:00 2001 From: yelingzhi Date: Fri, 2 Aug 2024 06:16:34 +0000 Subject: [PATCH 222/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=B5=8C=E5=A5=97?= =?UTF-8?q?=E6=95=B0=E7=BB=84=E7=BB=99=E5=B5=8C=E5=A5=97=E6=95=B0=E7=BB=84?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E8=B5=8B=E5=80=BC=E6=9C=AA=E7=94=9F=E6=95=88?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/gram.y | 6 ++++++ .../expected/plpgsql_nested_array_and_record.out | 12 ++++++++++++ .../regress/sql/plpgsql_nested_array_and_record.sql | 12 ++++++++++++ 3 files changed, 30 insertions(+) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 9a72b649c9..02bf841374 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -8807,13 +8807,18 @@ read_sql_construct6(int until, ds_changed = true; break; case T_VARRAY_VAR: + { idents = yylval.wdatum.idents; + PLpgSQL_var* var = (PLpgSQL_var*)(yylval.wdatum.datum); if (idents == NIL) { AddNamespaceIfPkgVar(yylval.wdatum.ident, save_IdentifierLookup); } tok = yylex(); if (tok == '(' || tok == '[') { push_array_parse_stack(&context, parenlevel, ARRAY_ACCESS); + } else if (OidIsValid(var->datatype->tableOfIndexType) && + (',' == tok || ')' == tok || ';' == tok)) { + is_have_tableof_index_var = true; } curloc = yylloc; plpgsql_push_back_token(tok); @@ -8826,6 +8831,7 @@ read_sql_construct6(int until, ds_changed = true; break; } + } case T_ARRAY_FIRST: { Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); diff --git a/src/test/regress/expected/plpgsql_nested_array_and_record.out b/src/test/regress/expected/plpgsql_nested_array_and_record.out index 53c40b5ba2..8be9a7a36d 100644 --- a/src/test/regress/expected/plpgsql_nested_array_and_record.out +++ b/src/test/regress/expected/plpgsql_nested_array_and_record.out @@ -178,6 +178,18 @@ drop package pac_PLArray_Case0021; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function plpgsql_nested_array_and_record.p_plarray_1() drop cascades to function plpgsql_nested_array_and_record.p_plarray_2(undefined) +declare + type typ_PLArray_1 is varray(3) of varchar(50); + type typ_PLArray_2 is varray(3) of typ_PLArray_1; + nstarr1 typ_PLArray_2; + nstarr2 typ_PLArray_2; +begin + nstarr1(1)(1):='第一行第一列'; + nstarr2:=nstarr1; + raise notice '二维数组nstarr(1)(1): %',nstarr2(1)(1); +end; +/ +NOTICE: 二维数组nstarr(1)(1): 第一行第一列 DROP SCHEMA plpgsql_nested_array_and_record CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function test_nested_array() diff --git a/src/test/regress/sql/plpgsql_nested_array_and_record.sql b/src/test/regress/sql/plpgsql_nested_array_and_record.sql index be31c8b8a3..39ef82f936 100644 --- a/src/test/regress/sql/plpgsql_nested_array_and_record.sql +++ b/src/test/regress/sql/plpgsql_nested_array_and_record.sql @@ -132,4 +132,16 @@ end pac_PLArray_Case0021; drop package pac_PLArray_Case0021; +declare + type typ_PLArray_1 is varray(3) of varchar(50); + type typ_PLArray_2 is varray(3) of typ_PLArray_1; + nstarr1 typ_PLArray_2; + nstarr2 typ_PLArray_2; +begin + nstarr1(1)(1):='第一行第一列'; + nstarr2:=nstarr1; + raise notice '二维数组nstarr(1)(1): %',nstarr2(1)(1); +end; +/ + DROP SCHEMA plpgsql_nested_array_and_record CASCADE; -- Gitee From ae8519169b41aa6026cdfd5a56f85f70e6ce0398 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Wed, 28 Aug 2024 10:43:37 +0800 Subject: [PATCH 223/347] =?UTF-8?q?DSS=20600=20827=E6=8E=A8=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 61d908823f..c4ef2e8f87 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ dms_commit_id=8b64ce46c8cfa9a978604b346b0d32b264c8ee6c -dss_commit_id=083e52af8c7f965856f319554d6332b14f6b2c99 -cbb_commit_id=49b0b0c664346a690c9fe9b537f264306dbdc46d +dss_commit_id=5d3e2aefdb4b51430a576bfdde057c1c08383afb +cbb_commit_id=7878a1919d2c0304a19f398aeddf0093dc147a37 -- Gitee From 98ff0047b92208f328a0b19a5fecfc004ce7206c Mon Sep 17 00:00:00 2001 From: cchen676 Date: Fri, 23 Aug 2024 11:34:20 +0800 Subject: [PATCH 224/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=AE=B5=E9=A1=B5?= =?UTF-8?q?=E5=BC=8F=E7=9A=84fsm=E9=A1=B5=E9=9D=A2=E4=B8=8D=E5=9B=9E?= =?UTF-8?q?=E6=94=BE=E5=AF=BC=E8=87=B4=E6=95=85=E9=9A=9C=E6=81=A2=E5=A4=8D?= =?UTF-8?q?=E5=90=8Efsm=E4=B8=AD=E7=9A=84=E9=A1=B5=E9=9D=A2=E5=8F=B7?= =?UTF-8?q?=E5=8F=AF=E8=83=BD=E5=A4=A7=E4=BA=8E=E5=AE=9E=E9=99=85=E7=9A=84?= =?UTF-8?q?=E7=89=A9=E7=90=86=E9=A1=B5=E9=9D=A2=E5=8F=B7=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/heap/heapam.cpp | 20 +++--- .../storage/access/redo/redo_xlogutils.cpp | 4 ++ src/gausskernel/storage/buffer/bufmgr.cpp | 8 +++ .../storage/freespace/freespace.cpp | 65 ++++++++++++++----- src/gausskernel/storage/freespace/fsmpage.cpp | 8 ++- src/gausskernel/storage/smgr/segstore.cpp | 21 +++--- src/include/storage/buf/bufpage.h | 9 +++ src/include/storage/freespace.h | 4 +- src/include/storage/smgr/segment.h | 3 +- 9 files changed, 106 insertions(+), 36 deletions(-) diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp index 02385ea1d9..18f5f46e52 100755 --- a/src/gausskernel/storage/access/heap/heapam.cpp +++ b/src/gausskernel/storage/access/heap/heapam.cpp @@ -8803,13 +8803,14 @@ static void heap_xlog_clean(XLogReaderState* record) XLogRedoAction action; RelFileNode rnode; BlockNumber blkno; + XLogPhyBlock pblk; bool repairFragmentation = true; if ((XLogRecGetInfo(record) & XLOG_HEAP2_NO_REPAIR_PAGE) != 0) { repairFragmentation = false; } - XLogRecGetBlockTag(record, HEAP_CLEAN_ORIG_BLOCK_NUM, &rnode, NULL, &blkno); + XLogRecGetBlockTag(record, HEAP_CLEAN_ORIG_BLOCK_NUM, &rnode, NULL, &blkno, &pblk); /* * We're about to remove tuples. In Hot Standby mode, ensure that there's @@ -8851,7 +8852,7 @@ static void heap_xlog_clean(XLogReaderState* record) * totally accurate anyway. */ if (action == BLK_NEEDS_REDO) { - XLogRecordPageWithFreeSpace(rnode, blkno, freespace); + XLogRecordPageWithFreeSpace(rnode, blkno, freespace, &pblk); } } @@ -9159,13 +9160,14 @@ static void heap_xlog_insert(XLogReaderState* record) XLogRedoAction action; RelFileNode target_node; BlockNumber blkno; + XLogPhyBlock pblk; if (isinit) { rec_data += sizeof(TransactionId); } xlrec = (xl_heap_insert*)rec_data; - XLogRecGetBlockTag(record, HEAP_INSERT_ORIG_BLOCK_NUM, &target_node, NULL, &blkno); + XLogRecGetBlockTag(record, HEAP_INSERT_ORIG_BLOCK_NUM, &target_node, NULL, &blkno, &pblk); /* * The visibility map may need to be fixed even if the heap page is @@ -9213,7 +9215,7 @@ static void heap_xlog_insert(XLogReaderState* record) * totally accurate anyway. */ if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5) { - XLogRecordPageWithFreeSpace(target_node, blkno, freespace); + XLogRecordPageWithFreeSpace(target_node, blkno, freespace, &pblk); } } @@ -9231,6 +9233,7 @@ static void heap_xlog_multi_insert(XLogReaderState* record) bool tde = XLogRecGetTdeInfo(record); XLogRedoAction action; Pointer rec_data; + XLogPhyBlock pblk; /* * Insertion doesn't overwrite MVCC data, so no conflict processing is @@ -9242,7 +9245,7 @@ static void heap_xlog_multi_insert(XLogReaderState* record) } xlrec = (xl_heap_multi_insert*)rec_data; - XLogRecGetBlockTag(record, HEAP_MULTI_INSERT_ORIG_BLOCK_NUM, &rnode, NULL, &blkno); + XLogRecGetBlockTag(record, HEAP_MULTI_INSERT_ORIG_BLOCK_NUM, &rnode, NULL, &blkno, &pblk); /* * The visibility map may need to be fixed even if the heap page is @@ -9287,7 +9290,7 @@ static void heap_xlog_multi_insert(XLogReaderState* record) * Skip segment-page relation, because FSM segment may have not been created yet. */ if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5) { - XLogRecordPageWithFreeSpace(rnode, blkno, freespace); + XLogRecordPageWithFreeSpace(rnode, blkno, freespace, &pblk); } } @@ -9307,6 +9310,7 @@ static void heap_xlog_update(XLogReaderState* record, bool hot_update) XLogRedoAction newaction; bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0; Pointer rec_data; + XLogPhyBlock pblk; rec_data = (Pointer)XLogRecGetData(record); if (isinit) { @@ -9315,7 +9319,7 @@ static void heap_xlog_update(XLogReaderState* record, bool hot_update) xlrec = (xl_heap_update*)rec_data; - XLogRecGetBlockTag(record, HEAP_UPDATE_NEW_BLOCK_NUM, &rnode, NULL, &newblk); + XLogRecGetBlockTag(record, HEAP_UPDATE_NEW_BLOCK_NUM, &rnode, NULL, &newblk, &pblk); if (XLogRecGetBlockTag(record, HEAP_UPDATE_OLD_BLOCK_NUM, NULL, NULL, &oldblk)) { /* HOT updates are never done across pages */ Assert(!hot_update); @@ -9417,7 +9421,7 @@ static void heap_xlog_update(XLogReaderState* record, bool hot_update) * totally accurate anyway. */ if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5) { - XLogRecordPageWithFreeSpace(rnode, newblk, freespace); + XLogRecordPageWithFreeSpace(rnode, newblk, freespace, &pblk); } } diff --git a/src/gausskernel/storage/access/redo/redo_xlogutils.cpp b/src/gausskernel/storage/access/redo/redo_xlogutils.cpp index cc40300a2d..2e5567acca 100644 --- a/src/gausskernel/storage/access/redo/redo_xlogutils.cpp +++ b/src/gausskernel/storage/access/redo/redo_xlogutils.cpp @@ -1675,6 +1675,10 @@ void UpdateFsm(RedoBufferTag *blockInfo, Size freespace) uint16 slot; FSMAddress addr = fsm_get_location(blockInfo->blkno, &slot); BlockNumber blkno = fsm_logical_to_physical(addr); + if (IsSegmentFileNode(blockInfo->rnode)) { + XLogRecordPageWithFreeSpace(blockInfo->rnode, blockInfo->blkno, freespace, &(blockInfo->pblk)); + return; + } RedoBufferInfo fsmBufInfo = {0}; fsmBufInfo.blockinfo.rnode = blockInfo->rnode; diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index d5d15f30bf..196ef801ad 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -2910,6 +2910,10 @@ void PageCheckIfCanEliminate(BufferDesc *buf, uint64 *oldFlags, bool *needGetLoc return; } + if (IsSegmentFileNode(buf->tag.rnode) && buf->tag.forkNum != MAIN_FORKNUM) { + return; + } + Block tmpBlock = BufHdrGetBlock(buf); if ((*oldFlags & BM_TAG_VALID) && !XLByteEQ(buf->extra->lsn_on_disk, PageGetLSN(tmpBlock)) && @@ -2935,6 +2939,10 @@ void PageCheckWhenChosedElimination(const BufferDesc *buf, uint64 oldFlags) return; } + if (IsSegmentFileNode(buf->tag.rnode) && buf->tag.forkNum != MAIN_FORKNUM) { + return; + } + if ((oldFlags & BM_TAG_VALID) && RecoveryInProgress()) { if (!XLByteEQ(buf->lsn_dirty, InvalidXLogRecPtr)) { Assert(XLByteEQ(buf->extra->lsn_on_disk, buf->lsn_dirty)); diff --git a/src/gausskernel/storage/freespace/freespace.cpp b/src/gausskernel/storage/freespace/freespace.cpp index 3e92898ef2..9edd9ea68c 100644 --- a/src/gausskernel/storage/freespace/freespace.cpp +++ b/src/gausskernel/storage/freespace/freespace.cpp @@ -33,6 +33,7 @@ #include "storage/fsm_internals.h" #include "storage/lmgr.h" #include "storage/smgr/smgr.h" +#include "storage/smgr/segment.h" #include "commands/tablespace.h" #include "utils/aiomem.h" #include "gstrace/gstrace_infra.h" @@ -174,30 +175,41 @@ void UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum, BlockNumber endBl * XLogRecordPageWithFreeSpace - like RecordPageWithFreeSpace, for use in * WAL replay */ -void XLogRecordPageWithFreeSpace(const RelFileNode& rnode, BlockNumber heapBlk, Size spaceAvail) +void XLogRecordPageWithFreeSpace(const RelFileNode& rnode, BlockNumber heapBlk, Size spaceAvail, XLogPhyBlock *pblk) { - /* - * FSM can not be read by physical location in recovery. It is possible to write on wrong places - * if the FSM fork is dropped and then allocated when replaying old xlog. - * Since FSM does not have to be totally accurate anyway, just skip it. - */ - if (IsSegmentFileNode(rnode)) { - return; - } - int new_cat = fsm_space_avail_to_cat(spaceAvail); FSMAddress addr; uint16 slot; BlockNumber blkno; Buffer buf; Page page; + XLogPhyBlock fsm_pblk; /* Get the location of the FSM byte representing the heap block */ addr = fsm_get_location(heapBlk, &slot); blkno = fsm_logical_to_physical(addr); + /* For segment storage we don't change fsm if fsm block is not extented yet */ + if (IsSegmentFileNode(rnode)) { + SMgrRelation reln = smgropen(rnode, InvalidBackendId); + SegSpace *spc = spc_open(rnode.spcNode, rnode.dbNode, false); + SegmentCheck(!PointerIsValid(pblk) || PhyBlockIsValid(*pblk)); + + fsm_pblk.relNode = InvalidOid; + fsm_pblk.block = blkno; + if (spc == NULL || !seg_fork_exists(spc, reln, FSM_FORKNUM, pblk, &fsm_pblk) || + fsm_pblk.relNode == InvalidOid) { + smgrclose(reln); + return; + } + } + /* If the page doesn't exist already, extend */ - buf = XLogReadBufferExtended(rnode, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL); + if (IsSegmentFileNode(rnode)) { + buf = XLogReadBufferExtended(rnode, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, &fsm_pblk); + } else { + buf = XLogReadBufferExtended(rnode, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL); + } LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -205,8 +217,15 @@ void XLogRecordPageWithFreeSpace(const RelFileNode& rnode, BlockNumber heapBlk, if (PageIsNew(page)) PageInit(page, BLCKSZ, 0); - if (fsm_set_avail(page, (int)slot, (uint8)new_cat)) - MarkBufferDirtyHint(buf, false); + if (fsm_set_avail(page, (int)slot, (uint8)new_cat)) { + if (IsSegmentFileNode(rnode)) { + XlogRecordSetFsmPageLsn(page, pblk->lsn); + MarkBufferDirty(buf); + } else { + MarkBufferDirtyHint(buf, false); + } + } + UnlockReleaseBuffer(buf); } @@ -693,8 +712,14 @@ static int fsm_set_and_search(Relation rel, const FSMAddress &addr, uint16 slot, LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); page = BufferGetPage(buf); - if (fsm_set_avail(page, slot, newValue)) - MarkBufferDirtyHint(buf, false); + if (fsm_set_avail(page, slot, newValue)) { + if (IsSegmentFileNode(rel->rd_node)) { + PageSetLSN(page, GetXLogInsertRecPtr()); + MarkBufferDirty(buf); + } else { + MarkBufferDirtyHint(buf, false); + } + } if (minValue != 0 && search) { /* Search while we still hold the lock */ @@ -833,8 +858,14 @@ static uint8 fsm_vacuum_page(Relation rel, const FSMAddress& addr, bool* eof_p) /* Update information about the child */ if (fsm_get_avail(page, slot) != child_avail) { LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); - fsm_set_avail(page, slot, (uint8)child_avail); - MarkBufferDirtyHint(buf, false); + if (fsm_set_avail(page, slot, (uint8)child_avail)) { + if (IsSegmentFileNode(rel->rd_node)) { + PageSetLSN(page, GetXLogInsertRecPtr()); + MarkBufferDirty(buf); + } else { + MarkBufferDirtyHint(buf, false); + } + } LockBuffer(buf, BUFFER_LOCK_UNLOCK); } } diff --git a/src/gausskernel/storage/freespace/fsmpage.cpp b/src/gausskernel/storage/freespace/fsmpage.cpp index 5249b572b9..5f0b4140ac 100644 --- a/src/gausskernel/storage/freespace/fsmpage.cpp +++ b/src/gausskernel/storage/freespace/fsmpage.cpp @@ -25,6 +25,7 @@ #include "knl/knl_variable.h" #include "storage/buf/bufmgr.h" +#include "storage/smgr/smgr.h" #include "storage/fsm_internals.h" /* Macros to navigate the tree within a page. Root has index zero. */ @@ -273,7 +274,12 @@ restart: exclusive_lock_held = true; } fsm_rebuild_page(page); - MarkBufferDirtyHint(buf, false); + if (IsSegmentFileNode(rnode)) { + PageSetLSN(page, GetXLogInsertRecPtr()); + MarkBufferDirty(buf); + } else { + MarkBufferDirtyHint(buf, false); + } goto restart; } } diff --git a/src/gausskernel/storage/smgr/segstore.cpp b/src/gausskernel/storage/smgr/segstore.cpp index dfe77382ff..e9bf08c7c3 100755 --- a/src/gausskernel/storage/smgr/segstore.cpp +++ b/src/gausskernel/storage/smgr/segstore.cpp @@ -372,11 +372,6 @@ SegPageLocation seg_logic_to_physic_mapping(SMgrRelation reln, SegmentHead *seg_ /* Recovery thread should use physical location to read data directly. */ if (SS_DISASTER_MAIN_STANDBY_NODE) { ereport(DEBUG1, (errmsg("can segment address translation when role is SS_DISASTER_MAIN_STANDBY_NODE"))); - } else { - if (RecoveryInProgress() && !CurrentThreadIsWorker() && !SS_IN_FLUSHCOPY) { - ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), - errhint("cannot do segment address translation during recovery"))); - } } SegLogicPageIdToExtentId(logic_id, &extent_id, &offset, &extent_size); @@ -1389,7 +1384,9 @@ void seg_extend_internal(SMgrRelation reln, ForkNumber forknum, BlockNumber bloc */ while (head->total_blocks <= blocknum) { LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - seg_extend_segment(reln->seg_space, forknum, buffer, reln->seg_desc[forknum]->head_blocknum); + if (head->total_blocks <= blocknum) { + seg_extend_segment(reln->seg_space, forknum, buffer, reln->seg_desc[forknum]->head_blocknum); + } LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } @@ -1847,14 +1844,15 @@ BlockNumber seg_totalblocks(SMgrRelation reln, ForkNumber forknum) return res; } -bool seg_fork_exists(SegSpace *spc, SMgrRelation reln, ForkNumber forknum, const XLogPhyBlock *pblk) +bool seg_fork_exists(SegSpace *spc, SMgrRelation reln, ForkNumber forknum, const XLogPhyBlock *pblk, + XLogPhyBlock *fsm_pblk) { ASSERT_NORMAL_FORK(forknum); RelFileNode rnode = reln->smgr_rnode.node; BlockNumber lastblock = spc_size(spc, pblk->relNode, forknum); - if (pblk->block >= lastblock) { + if (pblk->block >= lastblock && fsm_pblk == NULL) { return false; } @@ -1893,6 +1891,13 @@ bool seg_fork_exists(SegSpace *spc, SMgrRelation reln, ForkNumber forknum, const ret = (seg_head->total_blocks != 0); } + if (ret && fsm_pblk != NULL) { + SegPageLocation loc = seg_logic_to_physic_mapping(reln, seg_head, fsm_pblk->block); + fsm_pblk->relNode = (uint8)EXTENT_SIZE_TO_TYPE(loc.extent_size); + fsm_pblk->block = loc.blocknum; + fsm_pblk->lsn = seg_head->lsn; + } + SegUnlockReleaseBuffer(seg_buffer); return ret; } diff --git a/src/include/storage/buf/bufpage.h b/src/include/storage/buf/bufpage.h index 5384af391f..cc7e355e1a 100644 --- a/src/include/storage/buf/bufpage.h +++ b/src/include/storage/buf/bufpage.h @@ -412,6 +412,15 @@ inline void PageSetLSN(Page page, XLogRecPtr LSN, bool check = true) } PageSetLSNInternal(page, LSN); } + +/* fsm page set lsn only if `LSN` bigger in redo */ +inline void XlogRecordSetFsmPageLsn(Page page, XLogRecPtr LSN) +{ + if (XLByteLT(PageGetLSN(page), LSN)) { + PageSetLSNInternal(page, LSN); + } +} + #endif #define PageHasFreeLinePointers(page) (((PageHeader)(page))->pd_flags & PD_HAS_FREE_LINES) diff --git a/src/include/storage/freespace.h b/src/include/storage/freespace.h index bf26bcf304..59b7a6fcf8 100644 --- a/src/include/storage/freespace.h +++ b/src/include/storage/freespace.h @@ -18,6 +18,7 @@ #include "storage/smgr/relfilenode.h" #include "utils/relcache.h" #include "utils/partcache.h" +#include "access/xlog_basic.h" typedef uint64 XLogRecPtr; @@ -83,7 +84,8 @@ extern BlockNumber GetPageWithFreeSpace(Relation rel, Size spaceNeeded); extern BlockNumber RecordAndGetPageWithFreeSpace( Relation rel, BlockNumber oldPage, Size oldSpaceAvail, Size spaceNeeded); extern void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail); -extern void XLogRecordPageWithFreeSpace(const RelFileNode& rnode, BlockNumber heapBlk, Size spaceAvail); +extern void XLogRecordPageWithFreeSpace(const RelFileNode& rnode, BlockNumber heapBlk, Size spaceAvail, + XLogPhyBlock *pblk = NULL); extern void FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks); extern void FreeSpaceMapVacuum(Relation rel); diff --git a/src/include/storage/smgr/segment.h b/src/include/storage/smgr/segment.h index a87f7aa502..6457574a2a 100644 --- a/src/include/storage/smgr/segment.h +++ b/src/include/storage/smgr/segment.h @@ -48,7 +48,8 @@ void seg_post_ckpt(void); void seg_async_read(SMgrRelation reln, ForkNumber forknum, AioDispatchDesc_t **dList, int32 dn); void seg_async_write(SMgrRelation reln, ForkNumber forknum, AioDispatchDesc_t **dList, int32 dn); void seg_move_buckets(const RelFileNodeBackend &dest, const RelFileNodeBackend &src, List *bucketList); -bool seg_fork_exists(SegSpace *spc, SMgrRelation reln, ForkNumber forknum, const XLogPhyBlock *pblk); +bool seg_fork_exists(SegSpace *spc, SMgrRelation reln, ForkNumber forknum, const XLogPhyBlock *pblk, + XLogPhyBlock *fsm_pblk = NULL); void seg_direct_read(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber *blocknums, char *buffer, BlockNumber *locBlock); BlockNumber seg_direct_read_get_range(BlockNumber logic_id); -- Gitee From ab766232d938df80e920e66aa424b9eea5fcdef7 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Fri, 26 Jul 2024 18:38:00 +0800 Subject: [PATCH 225/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=AE=B5=E9=94=99?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_comp.cpp | 19 +++++++++-------- src/common/pl/plpgsql/src/pl_exec.cpp | 5 +++++ .../expected/plpgsql_cursor_rowtype.out | 18 ++++++++++++++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 21 +++++++++++++++++++ 4 files changed, 54 insertions(+), 9 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index 353d021241..9428b697dd 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -3893,15 +3893,16 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ rec->tupdesc = getCursorTupleDesc(rec->expr, false, false); - nulls = (bool *)palloc(rec->tupdesc->natts * sizeof(bool)); - rc = memset_s(nulls, rec->tupdesc->natts * sizeof(bool), true, rec->tupdesc->natts * sizeof(bool)); - securec_check(rc, "\0", "\0"); - - rec->tup = (HeapTuple)tableam_tops_form_tuple(rec->tupdesc, NULL, nulls); - /* compile_tmp_cx will automatically free, there is no need to set free mark. */ - rec->freetupdesc = false; - rec->freetup = false; - pfree_ext(nulls); + if (rec->tupdesc) { + nulls = (bool*)palloc(rec->tupdesc->natts * sizeof(bool)); + rc = memset_s(nulls, rec->tupdesc->natts * sizeof(bool), true, rec->tupdesc->natts * sizeof(bool)); + securec_check(rc, "\0", "\0"); + rec->tup = (HeapTuple)tableam_tops_form_tuple(rec->tupdesc, NULL, nulls); + /* compile_tmp_cx will automatically free, there is no need to set free mark. */ + rec->freetupdesc = false; + rec->freetup = false; + pfree_ext(nulls); + } if (target_cxt) { temp = MemoryContextSwitchTo(temp); diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index 7fc1b1e2c9..2c3415c77d 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1215,6 +1215,11 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d rec->expr->func = func; new_tupdesc = get_cursor_tupledesc_exec(rec->expr, false, false); + if (new_tupdesc == NULL) { + temp = MemoryContextSwitchTo(temp); + return; + } + new_natts = new_tupdesc->natts; newnulls = (bool *)palloc(new_natts * sizeof(bool)); diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 302fda17b0..6d787e290b 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -1315,6 +1315,24 @@ INFO: after loop: (,) drop package pck_for; NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +create table t_Compare_Case0013(id int,first_name varchar(100), last_name varchar(100)); +create table t_CurRowtype_PLObject_Case0013(first_name varchar(100), last_name varchar(100)); +insert into t_CurRowtype_PLObject_Case0013 values('Jason','Statham'); +create or replace function f_CurRowtype_PLObject_Case0013() returns trigger as +$$ +declare + cursor cur_1 is select * from t_CurRowtype_PLObject_Case0013; + source cur_1%rowtype; +begin + source.first_name:=new.first_name; + source.last_name:=new.last_name; + insert into t_Compare_Case0013 values (source.first_name,source.last_name); + return new; +end +$$ language plpgsql; +drop function f_CurRowtype_PLObject_Case0013; +drop table t_CurRowtype_PLObject_Case0013; +drop table t_Compare_Case0013; set behavior_compat_options=''; set plsql_compile_check_options='for_loop'; -- (b) definde as scarlar diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 2f02dbeb87..b60c00eacd 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -1024,6 +1024,27 @@ end pck_for; call pck_for.p1(); drop package pck_for; +create table t_Compare_Case0013(id int,first_name varchar(100), last_name varchar(100)); +create table t_CurRowtype_PLObject_Case0013(first_name varchar(100), last_name varchar(100)); +insert into t_CurRowtype_PLObject_Case0013 values('Jason','Statham'); + +create or replace function f_CurRowtype_PLObject_Case0013() returns trigger as +$$ +declare + cursor cur_1 is select * from t_CurRowtype_PLObject_Case0013; + source cur_1%rowtype; +begin + source.first_name:=new.first_name; + source.last_name:=new.last_name; + insert into t_Compare_Case0013 values (source.first_name,source.last_name); + return new; +end +$$ language plpgsql; + +drop function f_CurRowtype_PLObject_Case0013; +drop table t_CurRowtype_PLObject_Case0013; +drop table t_Compare_Case0013; + set behavior_compat_options=''; set plsql_compile_check_options='for_loop'; -- Gitee From 6f6ffa16de3f7bd8ac2714a8cfa7e34c50a909d7 Mon Sep 17 00:00:00 2001 From: lyannaa <1016943941@qq.com> Date: Mon, 26 Aug 2024 16:49:26 +0800 Subject: [PATCH 226/347] =?UTF-8?q?1.switch=20zone=E4=BB=A3=E7=A0=81?= =?UTF-8?q?=E5=8A=A0=E5=9B=BA=202.=E5=88=A0=E9=99=A4=E6=97=A0=E7=94=A8?= =?UTF-8?q?=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc.cpp | 2 - src/gausskernel/optimizer/commands/verify.cpp | 90 ++- .../storage/access/common/reloptions.cpp | 47 +- .../storage/access/transam/xact.cpp | 8 +- .../storage/access/ubtree/ubtdump.cpp | 641 +++++++----------- .../storage/access/ubtree/ubtinsert.cpp | 13 +- .../storage/access/ubtree/ubtrecycle.cpp | 199 +----- .../storage/access/ubtree/ubtree.cpp | 3 +- .../storage/access/ustore/knl_undoaction.cpp | 13 +- .../storage/access/ustore/knl_upage.cpp | 44 +- .../storage/access/ustore/knl_uundorecord.cpp | 65 +- .../storage/access/ustore/knl_uvacuumlazy.cpp | 2 +- .../access/ustore/undo/knl_uundoapi.cpp | 11 +- .../access/ustore/undo/knl_uundorecycle.cpp | 4 +- .../access/ustore/undo/knl_uundospace.cpp | 1 - .../access/ustore/undo/knl_uundotxn.cpp | 109 +-- .../access/ustore/undo/knl_uundozone.cpp | 87 +-- src/include/access/reloptions.h | 2 +- src/include/access/ubtree.h | 8 +- src/include/access/ustore/knl_uverify.h | 21 - src/include/access/ustore/undo/knl_uundotxn.h | 6 +- .../access/ustore/undo/knl_uundozone.h | 18 +- .../knl/knl_guc/knl_session_attr_storage.h | 1 - src/include/utils/rel.h | 2 +- ...single_node_produce_commit_rollback.source | 41 +- ...single_node_produce_commit_rollback.source | 59 +- ...store_subpartition_vacuum_partition.source | 10 +- 27 files changed, 450 insertions(+), 1057 deletions(-) diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 33b089a0f4..affd0be0bf 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -14158,7 +14158,6 @@ static void analysis_options_guc_assign(const char* newval, void* extra) #define DEFAULT_SYNC_ROLLBACK true #define DEFAULT_ASYNC_ROLLBACK true #define DEFAULT_PAGE_ROLLBACK true -#define DEFAULT_USTORE_VERIFY false static void InitUStoreAttr() { @@ -14168,7 +14167,6 @@ static void InitUStoreAttr() u_sess->attr.attr_storage.umax_search_length_for_prune = DEFAULT_UMAX_PRUNE_SEARCH_LEN; u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_DEFAULT; u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_INVALID; - u_sess->attr.attr_storage.ustore_verify = DEFAULT_USTORE_VERIFY; u_sess->attr.attr_storage.enable_ustore_sync_rollback = DEFAULT_SYNC_ROLLBACK; u_sess->attr.attr_storage.enable_ustore_async_rollback = DEFAULT_ASYNC_ROLLBACK; u_sess->attr.attr_storage.enable_ustore_page_rollback = DEFAULT_PAGE_ROLLBACK; diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp index ee7923dd61..be98fb6126 100644 --- a/src/gausskernel/optimizer/commands/verify.cpp +++ b/src/gausskernel/optimizer/commands/verify.cpp @@ -62,17 +62,17 @@ static void VerifyPartIndexRel(VacuumStmt* stmt, Relation rel, Relation partitio static void VerifyIndexRels(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL); static void VerifyIndexRel(VacuumStmt* stmt, Relation indexRel, VerifyDesc* checkCudesc = NULL); static void VerifyRowRels(VacuumStmt* stmt, Relation parentRel, Relation rel); -static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); -static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); -static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); -static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc = NULL, ForkNumber forkNum = MAIN_FORKNUM); +static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL); +static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc = NULL); +static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc = NULL); +static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc = NULL); static void VerifyColRels(VacuumStmt* stmt, Relation parentRel, Relation rel); static void VerifyColRel(VacuumStmt* stmt, Relation rel); static void VerifyColRelFast(Relation rel); static void VerifyColRelComplete(Relation rel); static void reportColVerifyFailed( Relation rel, bool isdesc = false, bool iscomplete = false, BlockNumber cuId = 0, int col = 0); -static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, ForkNumber forkNum); +static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, VerifyLevel level); /* * MainCatalogRelid is used to analyse verify the main system tables. */ @@ -1139,9 +1139,6 @@ static void VerifyIndexRel(VacuumStmt* stmt, Relation indexRel, VerifyDesc* chec relation_close(psortColRel, AccessShareLock); } else { VerifyRowRel(stmt, indexRel, checkCudesc); - if (RelationIsUstoreIndex(indexRel)) { - VerifyRowRel(stmt, indexRel, checkCudesc, FSM_FORKNUM); - } } } PG_CATCH(); @@ -1221,23 +1218,23 @@ static void VerifyRowRels(VacuumStmt* stmt, Relation parentRel, Relation rel) * @in&out checkCudesc - checkCudesc is a struct to judge whether cudesc tables is damaged. * @return: void */ -static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) +static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc) { /* turn off the remote read and keep the old mode */ int oldRemoteReadMode = SetRemoteReadModeOffAndGetOldMode(); bool isValidRelationPage = true; Oid relid = RelationGetRelid(rel); - isValidRelationPage = VerifyRowRelFull(stmt, rel, checkCudesc, forkNum); + isValidRelationPage = VerifyRowRelFull(stmt, rel, checkCudesc); SetRemoteReadMode(oldRemoteReadMode); if (!isValidRelationPage && IsMainCatalogObjectForVerify(relid)) { ereport(FATAL, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("The important catalog table %s.%s corrupts, the node is %s, forknum %d please fix it.", + errmsg("The important catalog table %s.%s corrupts, the node is %s, please fix it.", get_namespace_name(RelationGetNamespace(rel)), RelationGetRelationName(rel), - g_instance.attr.attr_common.PGXCNodeName, forkNum), + g_instance.attr.attr_common.PGXCNodeName), handle_in_client(true))); } @@ -1253,7 +1250,7 @@ static void VerifyRowRel(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc * @in&out checkCudesc - checkCudesc is a struct to judge whether cudesc tables is damaged. * @return: bool */ -static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) +static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) { if (unlikely(rel == NULL)) { ereport(ERROR, @@ -1267,6 +1264,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber f char* buf = (char*)palloc(BLCKSZ); BlockNumber nblocks; BlockNumber blkno; + ForkNumber forkNum = MAIN_FORKNUM; bool isValidRelationPage = true; char* namespace_name = get_namespace_name(RelationGetNamespace(rel)); @@ -1300,7 +1298,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber f /* Ustrore white-box verification adapt to analyze verify. */ if (rdStatus == SMGR_RD_OK) { Page page = (char *) buf; - VerifyUstorePage(rel, page, blkno, forkNum); + VerifyUstorePage(rel, page, blkno, USTORE_VERIFY_FAST); } continue; } @@ -1329,7 +1327,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber f } else if (rdStatus == SMGR_RD_OK) { /* Ustrore white-box verification adapt to analyze verify. */ Page page = (char *) buf; - VerifyUstorePage(rel, page, blkno, forkNum); + VerifyUstorePage(rel, page, blkno, USTORE_VERIFY_FAST); } } @@ -1346,7 +1344,7 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc, ForkNumber f * @in&out checkCudesc - checkCudesc is a struct to judge whether cudesc tables is damaged. * @return: bool */ -static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) +static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc) { if (RELATION_IS_GLOBAL_TEMP(rel) && !gtt_storage_attached(RelationGetRelid(rel))) { return true; @@ -1358,6 +1356,7 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc, ForkNumb Datum* values = NULL; bool* nulls = NULL; int numberOfAttributes = 0; + ForkNumber forkNum = MAIN_FORKNUM; bool isValidRelationPageFast = true; bool isValidRelationPageComplete = true; SMgrRelation smgrRel = NULL; @@ -1374,23 +1373,25 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc, ForkNumb MemoryContext oldMemContext = MemoryContextSwitchTo(verifyRowMemContext); /* check page header and crc first */ - isValidRelationPageFast = VerifyRowRelFast(rel, checkCudesc, forkNum); - - /* check all tuples of ustore relation. */ - buf = (char*)palloc(BLCKSZ); - RelationOpenSmgr(rel); - smgrRel = rel->rd_smgr; - nblocks = smgrnblocks(smgrRel, forkNum); - for (BlockNumber blkno = 0; blkno < nblocks; blkno++) { - CHECK_FOR_INTERRUPTS(); - SMGR_READ_STATUS rdStatus = smgrread(smgrRel, forkNum, blkno, buf); - if (rdStatus == SMGR_RD_OK) { - Page page = (char *) buf; - VerifyUstorePage(rel, page, blkno, forkNum); + isValidRelationPageFast = VerifyRowRelFast(rel, checkCudesc); + + if (RelationIsUstoreIndex(rel) || RelationIsUstoreFormat(rel)) { + /* check all tuples of ustore relation. */ + buf = (char*)palloc(BLCKSZ); + RelationOpenSmgr(rel); + smgrRel = rel->rd_smgr; + nblocks = smgrnblocks(smgrRel, forkNum); + for (BlockNumber blkno = 0; blkno < nblocks; blkno++) { + CHECK_FOR_INTERRUPTS(); + SMGR_READ_STATUS rdStatus = smgrread(smgrRel, forkNum, blkno, buf); + if (rdStatus == SMGR_RD_OK) { + Page page = (char *) buf; + VerifyUstorePage(rel, page, blkno, USTORE_VERIFY_COMPLETE); + } } + pfree_ext(buf); } - pfree_ext(buf); - + if (rel->rd_rel->relkind == RELKIND_RELATION || rel->rd_rel->relkind == RELKIND_TOASTVALUE) { /* check the tuple */ tupleDesc = RelationGetDescr(rel); @@ -1482,9 +1483,9 @@ static bool VerifyRowRelComplete(Relation rel, VerifyDesc* checkCudesc, ForkNumb return (isValidRelationPageFast && isValidRelationPageComplete); } -static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc, ForkNumber forkNum) +static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCudesc) { - bool (*verifyfunc)(Relation, VerifyDesc*, ForkNumber); + bool (*verifyfunc)(Relation, VerifyDesc*); Relation bucketRel = NULL; if ((unsigned int)stmt->options & VACOPT_FAST) { @@ -1501,7 +1502,7 @@ static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCu Assert(checkCudesc == NULL); for (int i = 0; i < bucketlist->dim1; i++) { bucketRel = bucketGetRelation(rel, NULL, bucketlist->values[i]); - if (verifyfunc(bucketRel, NULL, forkNum) == false) { + if (verifyfunc(bucketRel, NULL) == false) { bucketCloseRelation(bucketRel); return false; } @@ -1509,7 +1510,7 @@ static bool VerifyRowRelFull(VacuumStmt* stmt, Relation rel, VerifyDesc* checkCu } return true; } else - return verifyfunc(rel, checkCudesc, forkNum); + return verifyfunc(rel, checkCudesc); } @@ -1880,30 +1881,25 @@ void VerifyAbortBufferIO(void) } } -static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, ForkNumber forkNum) +static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, VerifyLevel level) { if (!RelationIsUstoreIndex(rel) && !RelationIsUstoreFormat(rel)) { return; } - bool temp = false; - BEGIN_SAVE_VERIFY(temp); - PG_TRY(); + int prevLevel = u_sess->attr.attr_storage.ustore_verify_level; + u_sess->attr.attr_storage.ustore_verify_level = level; + PG_TRY(); { if (RelationIsUstoreIndex(rel)) { - if (forkNum == MAIN_FORKNUM && blkno != 0) { - UBTreeVerifyAll(rel, page, blkno, InvalidOffsetNumber, false); - } else if (forkNum == FSM_FORKNUM) { - UBTRecycleQueueVerifyPageOffline(rel, page, blkno); - } + UBTreeVerify(rel, page, blkno); } else { UpageVerify((UHeapPageHeader)page, InvalidXLogRecPtr, NULL, rel, NULL, blkno); } } PG_CATCH(); { - END_SAVE_VERIFY(temp) - PG_RE_THROW(); + u_sess->attr.attr_storage.ustore_verify_level = prevLevel; } PG_END_TRY(); - END_SAVE_VERIFY(temp); + u_sess->attr.attr_storage.ustore_verify_level = prevLevel; } diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 3e433f5c79..72623bb219 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -84,8 +84,7 @@ static void ValidateStrOptStringOptimize(const char *val); static void ValidateStrOptEncryptAlgo(const char *val); static void ValidateStrOptDekCipher(const char *val); static void ValidateStrOptCmkId(const char *val); -static void SetUstoreDefaultFillfactor(void *rdopts, relopt_value *options, const relopt_parse_elt *elems, - int numoptions, int numelems); + #ifdef USE_SPQ static void CheckSpqBTBuildOption(const char *val); @@ -1434,7 +1433,7 @@ void ForbidUserToSetUnsupportedOptions(List *userOptions, const char *unsupporte * When validate is true, it is expected that all options appear in elems. */ void fillRelOptions(void *rdopts, Size basesize, relopt_value *options, int numoptions, bool validate, - const relopt_parse_elt *elems, int numelems, bool kindIsHeap) + const relopt_parse_elt *elems, int numelems) { int i; int offset = basesize; @@ -1504,9 +1503,7 @@ void fillRelOptions(void *rdopts, Size basesize, relopt_value *options, int numo ereport(ERROR, (errcode(ERRCODE_CASE_NOT_FOUND), errmsg("reloption \"%s\" not found in parse table", options[i].gen->name))); } - if (kindIsHeap) { - SetUstoreDefaultFillfactor((void *)rdopts, options, elems, numoptions, numelems); - } + SET_VARSIZE(rdopts, offset); } @@ -2062,7 +2059,7 @@ bytea *default_reloptions(Datum reloptions, bool validate, relopt_kind kind) rdopts = (StdRdOptions *)allocateReloptStruct(sizeof(StdRdOptions), options, numoptions); fillRelOptions((void *)rdopts, sizeof(StdRdOptions), options, numoptions, - validate, tab, lengthof(tab), kind == RELOPT_KIND_HEAP); + validate, tab, lengthof(tab)); for (int i = 0; i < numoptions; i++) { if (options[i].gen->type == RELOPT_TYPE_STRING && options[i].isset) @@ -3222,39 +3219,3 @@ void CheckSpqBTBuildOption(const char *val) } } #endif - -static void SetUstoreDefaultFillfactor(void *rdopts, relopt_value *options, - const relopt_parse_elt *elems, int numoptions, int numelems) -{ - int ff_options_idx = -1; - int fillfactor_idx = -1; - int storage_type_idx = -1; - - for (int i = 0; i < numoptions; i++) { - if (ff_options_idx == -1 && pg_strcasecmp("fillfactor", options[i].gen->name) == 0) { - ff_options_idx = i; - } - } - - for (int i = 0; i < numelems; i++) { - if (fillfactor_idx == -1 && pg_strcasecmp("fillfactor", elems[i].optname) == 0) { - fillfactor_idx = i; - continue; - } - if (storage_type_idx == -1 && pg_strcasecmp("storage_type", elems[i].optname) == 0) { - storage_type_idx = i; - continue; - } - } - - if (storage_type_idx != -1) { - char *stpos = ((char *)rdopts) + elems[storage_type_idx].offset; - char *itempos = ((char *)rdopts) + (*(int *)stpos); - if (pg_strcasecmp("ustore", itempos) == 0) { - char *ffpos = ((char *)rdopts) + elems[fillfactor_idx].offset; - if (!options[ff_options_idx].isset) { - *(int *)ffpos = UHEAP_DEFAULT_FILLFACTOR; - } - } - } -} diff --git a/src/gausskernel/storage/access/transam/xact.cpp b/src/gausskernel/storage/access/transam/xact.cpp index 24e4ad2a3b..c001299d35 100755 --- a/src/gausskernel/storage/access/transam/xact.cpp +++ b/src/gausskernel/storage/access/transam/xact.cpp @@ -8384,7 +8384,12 @@ void ApplyUndoActions() Assert(slot != NULL && topXid == slot->XactId()); if (slot != NULL && topXid == slot->XactId()) { needRollback = true; - break; + continue; + } else if (!IsSubTransaction()) { + t_thrd.undo_cxt.transUndoSize = 0; + t_thrd.undo_cxt.prevXid[i] = InvalidTransactionId; + t_thrd.undo_cxt.slots[i] = NULL; + t_thrd.undo_cxt.slotPtr[i] = INVALID_UNDO_REC_PTR; } } } @@ -8403,6 +8408,7 @@ void ApplyUndoActions() s->first_urp[0], s->first_urp[1], s->first_urp[UNDO_PERSISTENCE_LEVELS - 1], s->latest_urp[0], s->latest_urp[1], s->latest_urp[UNDO_PERSISTENCE_LEVELS - 1], s->latest_urp_xact[0], s->latest_urp_xact[1], s->latest_urp_xact[UNDO_PERSISTENCE_LEVELS - 1]))); + ResetUndoActionsInfo(); return; } diff --git a/src/gausskernel/storage/access/ubtree/ubtdump.cpp b/src/gausskernel/storage/access/ubtree/ubtdump.cpp index 9aeda50b59..dfaab0bc6e 100644 --- a/src/gausskernel/storage/access/ubtree/ubtdump.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtdump.cpp @@ -28,14 +28,6 @@ #include "utils/builtins.h" #include "storage/procarray.h" - -static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, - OffsetNumber firstPos, OffsetNumber lastPos); -static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno); -static void UBTreeVerifyHeader(PageHeaderData* page, Relation rel, BlockNumber blkno, uint16 pageSize, uint16 headerSize); -static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber blkno, OffsetNumber offset, - ItemIdSort indexSortPtr, const char *indexName, Relation rel); - void UBTreeVerifyIndex(Relation rel, TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols) { uint32 errVerified = 0; @@ -404,483 +396,334 @@ char* UBTGetVerifiedResultStr(uint32 type) } } -static bool UBTreeVerifyTupleTransactionStatus(Relation rel, BlockNumber blkno, OffsetNumber offnum, - TransactionIdStatus xminStatus, TransactionIdStatus xmaxStatus, - TransactionId xmin, TransactionId xmax, CommitSeqNo xminCSN, CommitSeqNo xmaxCSN) +static void VerifyIndexPageHeader(Relation rel, Page page, BlockNumber blkno, bool isLeaf, TransactionId xidBase) { - bool tranStatusError = false; - switch (xminStatus) { - case XID_COMMITTED: - tranStatusError = (xmaxStatus == XID_COMMITTED && xminCSN > xmaxCSN && xmaxCSN != COMMITSEQNO_FROZEN); - break; - case XID_INPROGRESS: - tranStatusError = (xmaxStatus == XID_COMMITTED && TransactionIdIsValid(xmax)); - break; - case XID_ABORTED: - tranStatusError = (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED); - break; - default: - break; - } - - if (tranStatusError) { - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] xmin or xmax status invalid, xmin=%lu, xmax=%lu, xminStatus=%d, " - "xmaxStatus=%d, xminCSN=%lu, xmaxCSN=%lu, rnode[%u,%u,%u], block %u, offnum %u.", - xmin, xmax, xminStatus, xmaxStatus, xminCSN, xmaxCSN, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); - return false; + PageHeader phdr = (PageHeader)page; + if (PageGetPageSize(phdr) != BLCKSZ || (phdr->pd_flags & ~PD_VALID_FLAG_BITS) != 0 || + phdr->pd_lower < GetPageHeaderSize(page) || phdr->pd_lower > phdr->pd_upper || + phdr->pd_upper > phdr->pd_special || phdr->pd_special > BLCKSZ) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg( + "UBTREEVERIFY index page header invalid: rel %s, size %lu, flags %u, lower %u, upper %u, " + "special %u, rnode[%u,%u,%u], block %u.", NameStr(rel->rd_rel->relname), PageGetPageSize(phdr), + phdr->pd_flags, phdr->pd_lower, phdr->pd_upper, phdr->pd_special, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); + } + if (isLeaf) { + TransactionId pruneXid = ShortTransactionIdToNormal(xidBase, phdr->pd_prune_xid); + TransactionId nextXid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; + if (TransactionIdFollows(xidBase, nextXid) || TransactionIdFollows(pruneXid, nextXid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg( + "UBTREEVERIFY index page header invalid: rel %s, xidBase %lu, pruneXid %lu, nextXid %lu, " + "rnode[%u,%u,%u], block %u.", NameStr(rel->rd_rel->relname), xidBase, pruneXid, nextXid, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); + } } - return true; } -static int ItemCompare(const void *item1, const void *item2) -{ - return ((ItemIdSort)item1)->start - ((ItemIdSort)item2)->start; -} - -void UBTreeVerifyHikey(Relation rel, Page page, BlockNumber blkno) -{ - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - +static void VerifyIndexHikeyAndOpaque(Relation rel, Page page, BlockNumber blkno) +{ UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - - if (P_RIGHTMOST(opaque)) - return; - - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; if (P_ISLEAF(opaque) ? (opaque->btpo.level != 0) : (opaque->btpo.level == 0)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted. level %u, flag %u, rnode[%u,%u,%u], block %u.", - opaque->btpo.level, opaque->btpo_flags, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - return; + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted rel %s, level %u, flag %u, rnode[%u,%u,%u], block %u.", + NameStr(rel->rd_rel->relname), opaque->btpo.level, opaque->btpo_flags, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } - - OffsetNumber lastPos = PageGetMaxOffsetNumber(page); - if (P_ISLEAF(opaque) ? (lastPos <= P_HIKEY) : (lastPos <= P_FIRSTKEY)) - return; - IndexTuple lastTuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, lastPos)); - BTScanInsert itupKey = UBTreeMakeScanKey(rel, lastTuple); - if (UBTreeCompare(rel, itupKey, page, P_HIKEY, InvalidBuffer) <= 0) { + /* compare last key and HIKEY */ + OffsetNumber lastPos = PageGetMaxOffsetNumber(page); + /* note that the first data key of internal pages has no value */ + if (!P_RIGHTMOST(opaque) && (P_ISLEAF(opaque) ? (lastPos > P_HIKEY) : (lastPos > P_FIRSTKEY))) { + IndexTuple lastTuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, lastPos)); + + /* we must hold: hikey >= lastKey */ + BTScanInsert itupKey = UBTreeMakeScanKey(rel, lastTuple); + if (UBTreeCompare(rel, itupKey, page, P_HIKEY, InvalidBuffer) > 0) { + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + index_deform_tuple(lastTuple, RelationGetDescr(rel), values, isnull); + char *keyDesc = BuildIndexValueDescription(rel, values, isnull); + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted key %s with HIKEY compare in rel %s, rnode[%u,%u,%u], block %u.", + (keyDesc ? keyDesc : "(UNKNOWN)"), NameStr(rel->rd_rel->relname), + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); + } pfree(itupKey); - return; } - pfree(itupKey); - - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - index_deform_tuple(lastTuple, RelationGetDescr(rel), values, isnull); - char *keyDesc = BuildIndexValueDescription(rel, values, isnull); - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("UBTREEVERIFY corrupted key %s with HIKEY compare in rel %s, rnode[%u,%u,%u], block %u.", - (keyDesc ? keyDesc : "(UNKNOWN)"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - } -void UBTreeVerifyPageXid(Relation rel, BlockNumber blkno, TransactionId xidBase, TransactionId pruneXid) +static void VerifyIndexOneItemId(Relation rel, Page page, BlockNumber blkno, OffsetNumber offset, + ItemIdSort itemIdSortPtr) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - if (TransactionIdFollows(xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid) || - TransactionIdPrecedes(xidBase + MaxShortTransactionId, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] ubtree's page xid_base invalid: indexName=%s, xid_base=%lu, nextxid=%lu, " + ItemId itemId = PageGetItemId(page, offset); + PageHeader phdr = (PageHeader)page; + uint16 pdUpper = phdr->pd_upper; + uint16 pdSpecial = phdr->pd_special; + if (!ItemIdIsUsed(itemId)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted unused line pointer: rel %s, offset %u, rpstart %u, rplen %u, " "rnode[%u,%u,%u], block %u.", - indexName, xidBase, t_thrd.xact_cxt.ShmemVariableCache->nextXid, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - return; + NameStr(rel->rd_rel->relname), offset, ItemIdGetOffset(itemId), ItemIdGetLength(itemId), + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } - if (TransactionIdFollows(pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] ubtree's page prune_xid invalid: indexName=%s, xid_base=%lu, nextxid=%lu, " + if (!ItemIdHasStorage(itemId)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted no storage line pointer: rel %s, offset %u, rpstart %u, rplen %u, " "rnode[%u,%u,%u], block %u.", - indexName, pruneXid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - return; + NameStr(rel->rd_rel->relname), offset, ItemIdGetOffset(itemId), ItemIdGetLength(itemId), + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); + } + itemIdSortPtr->start = ItemIdGetOffset(itemId); + itemIdSortPtr->end = itemIdSortPtr->start + SHORTALIGN(ItemIdGetLength(itemId)); + itemIdSortPtr->offset = offset; + if (itemIdSortPtr->start < pdUpper || itemIdSortPtr->end > pdSpecial) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted normal line pointer: rel %s, offset %u, rpstart %u, rplen %u, " + "rnode[%u,%u,%u], block %u.", + NameStr(rel->rd_rel->relname), offset, ItemIdGetOffset(itemId), ItemIdGetLength(itemId), + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); + } + IndexTuple ituple = (IndexTuple)PageGetItem(page, itemId); + int tupSize = IndexTupleSize(ituple); + if (tupSize > (int)ItemIdGetLength(itemId)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted tuple: rel %s, offset %u, tupsize %d, rpsize %u, " + "rnode[%u,%u,%u], block %u.", NameStr(rel->rd_rel->relname), offset, tupSize, ItemIdGetLength(itemId), + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } } -static void UBTreeVerifyTupleTransactionInfo(Relation rel, BlockNumber blkno, Page page, - OffsetNumber offnum, bool fromInsert, TransactionId xidBase) +static int ItemCompare(const void *item1, const void *item2) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - if (offnum == InvalidOffsetNumber) - return; - - IndexTuple tuple = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); - UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(tuple); - TransactionId xid = fromInsert ? - ShortTransactionIdToNormal(xidBase, uxid->xmin) : ShortTransactionIdToNormal(xidBase, uxid->xmax); - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - - if (TransactionIdIsNormal(xid) && !TransactionIdIsCurrentTransactionId(xid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), - errmodule(MOD_USTORE), errmsg("[Verify UBTree] tuple xid %s invalid: indexName=%s, xid=%lu, " - "rnode[%u,%u,%u], block %u, offnum %u.", - (fromInsert ? "xmin" : "xmax"), (rel && rel->rd_rel ? RelationGetRelationName(rel) : "Unknown"), xid, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); - } + return ((ItemIdSort)item1)->start - ((ItemIdSort)item2)->start; } -static void UBTreeVerifyAllTuplesTransactionInfo(Relation rel, Page page, BlockNumber blkno, - OffsetNumber startoffset, bool fromInsert, TransactionId xidBase) +static void VerifyIndexCompare(Relation rel, BlockNumber blkno, UBTPageOpaqueInternal opaque, + TupleDesc tupdes, BTScanInsert cmpKeys, int keysz, IndexTuple curKey, IndexTuple nextKey) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - - TransactionId maxXmax = InvalidTransactionId; - TransactionId minCommittedXmax = MaxTransactionId; - TransactionId pruneXid = ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid); - OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; - if (rel && RelationGetNamespace(rel) == PG_TOAST_NAMESPACE) { - GetOldestXminForUndo(&oldestXmin); - } - for (OffsetNumber offnum = startoffset; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemid = PageGetItemId(page, offnum); - IndexTuple tuple = (IndexTuple)PageGetItem(page, itemid); - UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(tuple); - TransactionId xmin = ShortTransactionIdToNormal(xidBase, uxid->xmin); - TransactionId xmax = ShortTransactionIdToNormal(xidBase, uxid->xmax); - - if (TransactionIdFollows(Max(xmin, xmax), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index tuple xid(xmin/xmax) is bigger than nextXid: " - "xmin=%lu, xmax=%lu, nextxid=%lu, xid_base=%lu, rnode[%u,%u,%u], block %u, offnum %u.", - xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextXid, xidBase, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); - return; - } - - uint32 base = u_sess->attr.attr_storage.ustore_verify ? MaxShortTransactionId : 0; - if (TransactionIdIsNormal(xmin) && !IndexItemIdIsFrozen(itemid) && - TransactionIdPrecedes(xmin + base, oldestXmin)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index tuple xmin invalid: xmin=%lu, oldest_xmin=%lu, xid_base=%lu, " - "rnode[%u,%u,%u], block %u, offnum %u.", - xmin, oldestXmin, xidBase, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); - return; - } - if (TransactionIdIsNormal(xmax) && !ItemIdIsDead(itemid) && - TransactionIdPrecedes(xmax + base, oldestXmin)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index tuple xmin invalid: xmax=%lu, oldest_xmin=%lu, xid_base=%lu, " - "rnode[%u,%u,%u], block %u, offnum %u.", - xmax, oldestXmin, xidBase, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offnum))); - return; - } - if (!u_sess->attr.attr_storage.ustore_verify) { - continue; - } - TransactionIdStatus xminStatus = UBTreeCheckXid(xmin); - CommitSeqNo xminCSN = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); - TransactionIdStatus xmaxStatus = UBTreeCheckXid(xmax); - CommitSeqNo xmaxCSN = TransactionIdGetCommitSeqNo(xmax, false, false, false, NULL); - - if (xminStatus == XID_INPROGRESS && xmaxStatus != XID_INPROGRESS && TransactionIdIsValid(xmax)) { - xminStatus = UBTreeCheckXid(xmin); - xminCSN = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); - } - - if (xmaxStatus == XID_COMMITTED && TransactionIdPrecedes(xmax, minCommittedXmax)) { - minCommittedXmax = xmax; - } - - if (TransactionIdFollows(xmax, maxXmax)) { - maxXmax = xmax; - } - if (!UBTreeVerifyTupleTransactionStatus(rel, blkno, offnum, xminStatus, xmaxStatus, - xmin, xmax, xminCSN, xmaxCSN)) { - return; + /* current key must <= next key */ + if (!_bt_index_tuple_compare(tupdes, cmpKeys->scankeys, keysz, curKey, nextKey)) { + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + char *curKeyDesc = NULL; + char *nextKeyDesc = NULL; + if (P_ISLEAF(opaque)) { + index_deform_tuple(curKey, RelationGetDescr(rel), values, isnull); + curKeyDesc = BuildIndexValueDescription(rel, values, isnull); + index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); + nextKeyDesc = BuildIndexValueDescription(rel, values, isnull); } - } - - UBTPageOpaque uopaque = (UBTPageOpaque)PageGetSpecialPointer(page); - UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - if (TransactionIdFollows(uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] xact xid is bigger than nextXid: xact=%lu, nextxid=%lu, rnode[%u,%u,%u], block %u.", - uopaque->xact, t_thrd.xact_cxt.ShmemVariableCache->nextXid, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - return; - } - if (!u_sess->attr.attr_storage.ustore_verify) { - return; - } - if (minCommittedXmax != MaxTransactionId && TransactionIdIsValid(pruneXid) && - TransactionIdFollows(minCommittedXmax, pruneXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] min_committed_xmax is bigger than prune_xid: prune_xid=%lu, minCommittedXmax=%lu, " - "rnode[%u,%u,%u], block %u.", - pruneXid, minCommittedXmax, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - return; - } - - if (TransactionIdIsValid(maxXmax) && TransactionIdIsValid(ubtOpaque->last_delete_xid) && - TransactionIdFollows(maxXmax, ubtOpaque->last_delete_xid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] max_xmax is bigger than last_delete_xid: last_delete_xid on page=%lu, actual value=%lu, " - "rnode[%u,%u,%u], block %u.", - ubtOpaque->last_delete_xid, maxXmax, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted key order %s %s, rel %s, rnode[%u,%u,%u], block %u.", + (curKeyDesc ? curKeyDesc : "(UNKNOWN)"), (nextKeyDesc ? nextKeyDesc : "(UNKNOWN)"), + NameStr(rel->rd_rel->relname), rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } } -void UBTreeVerifyRowptrDML(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum) +static void VerifyIndexItemId(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) { - if (u_sess->attr.attr_storage.ustore_verify) { - return UBTreeVerifyRowptrNonDML(rel, page, blkno); - } - if (offnum == InvalidOffsetNumber) { - return; - } - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); OffsetNumber lastPos = PageGetMaxOffsetNumber(page); if (firstPos > lastPos) { - return; + return; /* empty page */ } - ItemIdSort indexSortPtr = (ItemIdSort)palloc0(sizeof(ItemIdSortData)); - UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, offnum, indexSortPtr, indexName, rel); - pfree(indexSortPtr); - - UBTreeVerifyTupleKey(rel, page, blkno, offnum, firstPos, lastPos); -} - -void UBTreeVerifyItems(Relation rel, BlockNumber blkno, TupleDesc desc, BTScanInsert cmpKeys, int keysz, - IndexTuple currKey, IndexTuple nextKey, UBTPageOpaqueInternal opaque) -{ - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - if (_bt_index_tuple_compare(desc, cmpKeys->scankeys, keysz, currKey, nextKey)) - return; - - char *currkeyDesc = NULL; - char *nextkeyDesc = NULL; - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - - if (P_ISLEAF(opaque)) { - index_deform_tuple(currKey, RelationGetDescr(rel), values, isnull); - currkeyDesc = BuildIndexValueDescription(rel, values, isnull); - index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); - nextkeyDesc = BuildIndexValueDescription(rel, values, isnull); - } - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, rnode[%u,%u,%u], block %u.", - (nextkeyDesc ? nextkeyDesc : "(unknown)"), (currkeyDesc ? currkeyDesc : "(unknown)"), - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); -} + + ItemIdSort itemIdSortPtr = (ItemIdSort)palloc0(sizeof(ItemIdSortData)); + VerifyIndexOneItemId(rel, page, blkno, offnum, itemIdSortPtr); + pfree(itemIdSortPtr); -static void UBTreeVerifyTupleKey(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, - OffsetNumber firstPos, OffsetNumber lastPos) -{ CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - - UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - TupleDesc desc = RelationGetDescr(rel); - int keySize = IndexRelationGetNumberOfKeyAttributes(rel); + TupleDesc tupdes = RelationGetDescr(rel); + int keysz = IndexRelationGetNumberOfKeyAttributes(rel); BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); - IndexTuple currKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); + IndexTuple curKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); if (offnum > firstPos) { - ItemId itemId = PageGetItemId(page, OffsetNumberPrev(offnum)); - IndexTuple prev_key = (IndexTuple)PageGetItem(page, itemId); - UBTreeVerifyItems(rel, blkno, desc, cmpKeys, keySize, prev_key, currKey, opaque); + IndexTuple prevKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, OffsetNumberPrev(offnum))); + VerifyIndexCompare(rel, blkno, opaque, tupdes, cmpKeys, keysz, prevKey, curKey); } if (offnum < lastPos) { - ItemId itemId = PageGetItemId(page, OffsetNumberNext(offnum)); - IndexTuple next_key = (IndexTuple)PageGetItem(page, itemId); - UBTreeVerifyItems(rel, blkno, desc, cmpKeys, keySize, currKey, next_key, opaque); + IndexTuple nextKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, OffsetNumberNext(offnum))); + VerifyIndexCompare(rel, blkno, opaque, tupdes, cmpKeys, keysz, curKey, nextKey); } pfree(cmpKeys); + + if (P_ISLEAF(opaque)) { + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(curKey); + TransactionId xid = ShortTransactionIdToNormal(opaque->xid_base, + fromInsert ? uxid->xmin : uxid->xmax); + if (TransactionIdIsNormal(xid) && !TransactionIdIsCurrentTransactionId(xid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted tuple %s invalid: xid=%lu, rnode[%u,%u,%u], block %u, offnum %u.", + (fromInsert ? "xmin" : "xmax"), xid, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno, offnum))); + } + } } -static void UBTreeVerifyRowptrNonDML(Relation rel, Page page, BlockNumber blkno) +static void VerifyIndexPageItemId(Relation rel, Page page, BlockNumber blkno) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - - const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - TupleDesc desc = RelationGetDescr(rel); + TupleDesc tupdes = RelationGetDescr(rel); int keysz = IndexRelationGetNumberOfKeyAttributes(rel); - ItemIdSortData itemidBase[MaxIndexTuplesPerPage]; - ItemIdSort sortPtr = itemidBase; + ItemIdSortData itemIdBase[MaxIndexTuplesPerPage]; + ItemIdSort itemIdSortPtr = itemIdBase; UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); OffsetNumber lastPos = PageGetMaxOffsetNumber(page); - if (firstPos > lastPos) { - return; + return; /* empty page */ } + /* check key orders */ BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); - UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, firstPos, sortPtr, indexName, rel); - IndexTuple currKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); - OffsetNumber nextPos = OffsetNumberNext(firstPos); - sortPtr++; - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - while (nextPos <= lastPos) { - ItemId itemid = PageGetItemId(page, nextPos); - IndexTuple nextKey = (IndexTuple)PageGetItem(page, itemid); - if (P_ISLEAF(opaque) || nextPos > firstPos + 1) { - if (!_bt_index_tuple_compare(desc, cmpKeys->scankeys, keysz, currKey, nextKey)) { - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; - char *currkeyDesc = NULL; - char *nextkeyDesc = NULL; - if (P_ISLEAF(opaque)) { - index_deform_tuple(currKey, RelationGetDescr(rel), values, isnull); - currkeyDesc = BuildIndexValueDescription(rel, values, isnull); - index_deform_tuple(nextKey, RelationGetDescr(rel), values, isnull); - nextkeyDesc = BuildIndexValueDescription(rel, values, isnull); - } - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg( - "[Verify UBTree] nextkey >= currkey, nextkey: %s, currkey : %s, indexName=%s, " - "rnode[%u,%u,%u], block %u.", - (nextkeyDesc ? nextkeyDesc : "(unknown)"), (currkeyDesc ? currkeyDesc : "(unknown)"), indexName, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - pfree(cmpKeys); - return; - } + VerifyIndexOneItemId(rel, page, blkno, firstPos, itemIdSortPtr); + itemIdSortPtr++; + IndexTuple curKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); + for (OffsetNumber nxtPos = OffsetNumberNext(firstPos); nxtPos <= lastPos; nxtPos = OffsetNumberNext(nxtPos)) { + ItemId itemId = PageGetItemId(page, nxtPos); + IndexTuple nextKey = (IndexTuple)PageGetItem(page, itemId); + if (P_ISLEAF(opaque) || nxtPos > firstPos + 1) { + VerifyIndexCompare(rel, blkno, opaque, tupdes, cmpKeys, keysz, curKey, nextKey); } - currKey = nextKey; - UBTreeVerifyRowptr((PageHeaderData*)page, page, blkno, nextPos, sortPtr, indexName, rel); - nextPos = OffsetNumberNext(nextPos); - sortPtr++; + curKey = nextKey; + VerifyIndexOneItemId(rel, page, blkno, nxtPos, itemIdSortPtr); + itemIdSortPtr++; } - - int storageNum = sortPtr - itemidBase; - if (storageNum <= 1) { + + int nstorage = itemIdSortPtr - itemIdBase; + if (nstorage <= 1) { pfree(cmpKeys); return; } - - qsort((char*)itemidBase, storageNum, sizeof(ItemIdSortData), ItemCompare); - - for (int i = 0; i < storageNum - 1; i++) { - ItemIdSort tempPtr1 = &itemidBase[i]; - ItemIdSort tempPtr2 = &itemidBase[i + 1]; + + qsort((char *)itemIdBase, nstorage, sizeof(ItemIdSortData), ItemCompare); + + for (int i = 0; i < nstorage - 1; i++) { + ItemIdSort tempPtr1 = &itemIdBase[i]; + ItemIdSort tempPtr2 = &itemIdBase[i + 1]; if (tempPtr1->end > tempPtr2->start) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] Ubtree ItemIdSort conflict: indexName=%s, ptr1offset %u, " - "ptr1start = %u, ptr1end = %u, ptr2offset = %u, ptr2start = %u, ptr2end = %u, " - "rnode[%u,%u,%u], block %u.", - indexName, tempPtr1->offset, tempPtr1->start, tempPtr1->end, + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY corrupted line pointer: rel %s tempPtr1offset %u, tempPtr1start %u, " + "tempPtr1end %u, tempPtr2offset %u, tempPtr2start %u, tempPtr2end %u, rnode[%u,%u,%u], block %u.", + NameStr(rel->rd_rel->relname), tempPtr1->offset, tempPtr1->start, tempPtr1->end, tempPtr2->offset, tempPtr2->start, tempPtr2->end, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - pfree(cmpKeys); - return; + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } } - + pfree(cmpKeys); } -void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) +static bool UBTreeVerifyITupleTransactionStatus(TransactionIdStatus xminStatus, TransactionIdStatus xmaxStatus, + TransactionId xmin, TransactionId xmax, CommitSeqNo xminCSN, CommitSeqNo xmaxCSN) { - BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - BTPageOpaqueInternal opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); - if (P_IGNORE(opaque)) { - return; + if (xminStatus == XID_INPROGRESS && xmaxStatus == XID_COMMITTED && TransactionIdIsValid(xmax)) { + return false; } - - UBTreeVerifyHeader((PageHeaderData*)page, rel, blkno, PageGetPageSize((PageHeader)page), GetPageHeaderSize(page)); - UBTreeVerifyHikey(rel, page, blkno); - UBTreeVerifyRowptrDML(rel, page, blkno, offnum); - UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - if (!P_ISLEAF(ubtOpaque)) { - return; + if (xminStatus == XID_ABORTED && xmaxStatus != XID_ABORTED) { + return false; } - TransactionId xidBase = ubtOpaque->xid_base; - UBTreeVerifyPageXid(rel, blkno, xidBase, ShortTransactionIdToNormal(xidBase, ((PageHeader)page)->pd_prune_xid)); - UBTreeVerifyTupleTransactionInfo(rel, blkno, page, offnum, fromInsert, xidBase); + if (xminStatus == XID_COMMITTED && xmaxStatus == XID_COMMITTED) { + if (xminCSN > xmaxCSN && xmaxCSN != COMMITSEQNO_FROZEN) { + return false; + } + } + return true; } -static void UBTreeVerifyHeader(PageHeaderData* page, Relation rel, BlockNumber blkno, uint16 pageSize, uint16 headerSize) +static void VerifyIndexTransactionInfo(Relation rel, Page page, BlockNumber blkno) { - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - if (pageSize != BLCKSZ || (page->pd_flags & ~PD_VALID_FLAG_BITS) != 0 || page->pd_lower < headerSize || - page->pd_lower > page->pd_upper || page->pd_upper > page->pd_special || page->pd_special > BLCKSZ) { - const char *indexName = (rel && rel->rd_rel ? RelationGetRelationName(rel) : "unknown"); - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] index page header invalid: indexName=%s, size=%u," - "flags=%u, lower=%u, upper=%u, special=%u, rnode[%u,%u,%u], block %u.", indexName, headerSize, - page->pd_flags, page->pd_lower, page->pd_upper, page->pd_special, - rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - } -} + TransactionId xid_base = opaque->xid_base; + TransactionId pruneXid = ShortTransactionIdToNormal(xid_base, ((PageHeader)page)->pd_prune_xid); -static void UBTreeVerifyRowptr(PageHeaderData* header, Page page, BlockNumber blkno, OffsetNumber offset, - ItemIdSort indexSortPtr, const char *indexName, Relation rel) -{ - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - - ItemId itemId = PageGetItemId(page, offset); - unsigned rpStart = ItemIdGetOffset(itemId); - Size rpLen = ItemIdGetLength(itemId); - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; + /* stat info for prune_xid and last_delete_xid */ + TransactionId maxXmax = InvalidTransactionId; + TransactionId minCommittedXmax = MaxTransactionId; + OffsetNumber maxoff = PageGetMaxOffsetNumber(page); - if (!ItemIdIsUsed(itemId)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] row pointer is unused: indexName=%s, " - "rowPtr startOffset=%u, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", - indexName, rpStart, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); - return; - } - if (!ItemIdHasStorage(itemId)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] row pointer has no storage: indexName=%s, " - "rowPtr startOffset=%u, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", - indexName, rpStart, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); - return; + for (OffsetNumber offnum = P_FIRSTDATAKEY(opaque); offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { + ItemId iid = PageGetItemId(page, offnum); + IndexTuple itup = (IndexTuple)PageGetItem(page, iid); + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); + + /* fetch trans info */ + TransactionId xmin = ShortTransactionIdToNormal(xid_base, uxid->xmin); + TransactionId xmax = ShortTransactionIdToNormal(xid_base, uxid->xmax); + if (TransactionIdFollows(Max(xmin, xmax), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY itup xid invalid: rel %s, xmin/xmax %lu/%lu, nextxid %lu, xid-base %lu, " + "rnode[%u,%u,%u], block %u, offnum %u.", + NameStr(rel->rd_rel->relname), xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextXid, + xid_base, rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno, offnum))); + } + + TransactionIdStatus xminStatus = UBTreeCheckXid(xmin); + CommitSeqNo xminCSN = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); + TransactionIdStatus xmaxStatus = UBTreeCheckXid(xmax); + CommitSeqNo xmaxCSN = TransactionIdGetCommitSeqNo(xmax, false, false, false, NULL); + + if (xminStatus == XID_INPROGRESS && xmaxStatus != XID_INPROGRESS && TransactionIdIsValid(xmax)) { + xminStatus = UBTreeCheckXid(xmin); + xminCSN = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); + } + + if (xmaxStatus == XID_COMMITTED && TransactionIdPrecedes(xmax, minCommittedXmax)) { + minCommittedXmax = xmax; + } + if (TransactionIdFollows(xmax, maxXmax)) { + maxXmax = xmax; + } + if (!UBTreeVerifyITupleTransactionStatus(xminStatus, xmaxStatus, xmin, xmax, xminCSN, xmaxCSN)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY xmin xmax status invalid, rel %s, xmin %lu, xmax %lu, xminStatus %d," + "xmaxStatus %d, xminCSN %lu, xmaxCSN %lu, rnode[%u,%u,%u], block %u, offnum %u.", + NameStr(rel->rd_rel->relname), xmin, xmax, xminStatus, xmaxStatus, xminCSN, xmaxCSN, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno, offnum))); + } } - indexSortPtr->start = rpStart; - indexSortPtr->end = indexSortPtr->start + SHORTALIGN(ItemIdGetLength(itemId)); - indexSortPtr->offset = offset; - if (indexSortPtr->start < header->pd_upper || indexSortPtr->end > header->pd_special) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] The item corresponding to row pointer exceeds the range of item stored in the page: " - "indexName=%s, rowPtr startOffset=%u, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", - indexName, rpStart, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); - return; + + if (minCommittedXmax != MaxTransactionId && TransactionIdIsValid(pruneXid) && + TransactionIdFollows(minCommittedXmax, pruneXid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY prune_xid invalid, rel = %s, prune_xid on page = %lu, actual value = %lu, " + "rnode[%u,%u,%u], block %u.", + NameStr(rel->rd_rel->relname), pruneXid, minCommittedXmax, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } - int tupleSize = IndexTupleSize((IndexTuple)PageGetItem(page, itemId)); - if (tupleSize > (int)rpLen) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify UBTree] tuple size is bigger than item's len: indexName=%s, " - "tuple size=%d, rowPtr len=%lu, rnode[%u,%u,%u], block %u, offnum %u.", - indexName, tupleSize, rpLen, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno, offset))); - return; + + if (TransactionIdIsValid(maxXmax) && TransactionIdIsValid(opaque->last_delete_xid) && + TransactionIdFollows(maxXmax, opaque->last_delete_xid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("UBTREEVERIFY last_delete_xid invalid, rel = %s, last_delete_xid on page = %lu, " + "actual value = %lu, rnode[%u,%u,%u], block %u.", + NameStr(rel->rd_rel->relname), opaque->last_delete_xid, maxXmax, + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, blkno))); } } -void UBTreeVerifyAll(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) +void UBTreeVerify(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) { BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - UBTreeVerifyPage(rel, page, blkno, offnum, fromInsert); BTPageOpaqueInternal opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); if (P_IGNORE(opaque)) { return; } + UBTPageOpaqueInternal ubtOpaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - if (!P_ISLEAF(ubtOpaque)) { - return; + bool isLeaf = P_ISLEAF(ubtOpaque); + VerifyIndexPageHeader(rel, page, blkno, isLeaf, ubtOpaque->xid_base); + VerifyIndexHikeyAndOpaque(rel, page, blkno); + if (offnum != InvalidOffsetNumber) { + VerifyIndexItemId(rel, page, blkno, offnum, fromInsert); + } else { + CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) + VerifyIndexPageItemId(rel, page, blkno); + if (isLeaf) { + VerifyIndexTransactionInfo(rel, page, blkno); + } } - TransactionId xidBase = ubtOpaque->xid_base; - UBTreeVerifyAllTuplesTransactionInfo(rel, page, blkno, P_FIRSTDATAKEY(ubtOpaque), fromInsert, xidBase); } diff --git a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp index 5f2efeb9a6..e7b2682a4a 100644 --- a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp @@ -279,7 +279,8 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTr } END_CRIT_SECTION(); - UBTreeVerifyAll(rel, page, BufferGetBlockNumber(buf), InvalidOffsetNumber, false); + UBTreeVerify(rel, page, BufferGetBlockNumber(buf)); + return has_pruned; } @@ -1311,7 +1312,8 @@ static void UBTreeInsertOnPage(Relation rel, BTScanInsert itup_key, Buffer buf, } END_CRIT_SECTION(); - UBTreeVerifyPage(rel, page, BufferGetBlockNumber(buf), itup_off, true); + UBTreeVerify(rel, page, BufferGetBlockNumber(buf), itup_off, true); + /* release buffers */ if (BufferIsValid(metabuf)) { _bt_relbuf(rel, metabuf); @@ -1860,7 +1862,7 @@ static Buffer UBTreeSplit(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber fi END_CRIT_SECTION(); Page page = BufferGetPage(actualInsertBuf); - UBTreeVerifyPage(rel, page, BufferGetBlockNumber(actualInsertBuf), actualInsertOff, true); + UBTreeVerify(rel, page, BufferGetBlockNumber(actualInsertBuf), actualInsertOff, true); /* discard this page from the Recycle Queue */ UBTreeRecordUsedPage(rel, addr); @@ -2244,7 +2246,8 @@ static void UBTreeDeleteOnPage(Relation rel, Buffer buf, OffsetNumber offset, bo } END_CRIT_SECTION(); - UBTreeVerifyPage(rel, page, BufferGetBlockNumber(buf), offset, false); + UBTreeVerify(rel, page, BufferGetBlockNumber(buf), offset); + bool needRecordEmpty = (opaque->activeTupleCount == 0); if (needRecordEmpty) { /* @@ -2553,7 +2556,7 @@ static Buffer UBTreeNewRoot(Relation rel, Buffer lbuf, Buffer rbuf) } END_CRIT_SECTION(); - UBTreeVerifyAll(rel, rootpage, rootblknum, InvalidOffsetNumber, false); + UBTreeVerify(rel, rootpage, rootblknum); /* done with metapage */ _bt_relbuf(rel, metabuf); diff --git a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp index 74db5dc428..010b83aafe 100644 --- a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp @@ -46,10 +46,6 @@ static bool QueuePageIsEmpty(Buffer buf); static Buffer AcquireNextAvailableQueuePage(Relation rel, Buffer buf, UBTRecycleForkNumber forkNumber); static void InsertOnRecycleQueuePage(Relation rel, Buffer buf, uint16 offset, BlockNumber blkno, TransactionId xid); static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset); -static void UBTRecycleQueueExecVerify(int mode, UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, UBTRecycleMeta metaData, OffsetNumber offnum = InvalidOffsetNumber, bool fromInsert = false); -static void UBTRecycleQueueVerifyHeader(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno); -static void UBTRecycleQueueVerifyItem(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); -static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, BlockNumber metaBlkno); const BlockNumber FirstBlockNumber = 0; const BlockNumber FirstNormalBlockNumber = 2; /* 0 and 1 are pages which include meta data */ @@ -147,7 +143,6 @@ void UBTreeRecycleQueueInitPage(Relation rel, Page page, BlockNumber blkno, Bloc UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, blkno); header->prevBlkno = prevBlkno; header->nextBlkno = nextBlkno; - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, blkno, NULL); } /* record the chain changes in prev or next page */ @@ -159,7 +154,6 @@ void UBtreeRecycleQueueChangeChain(Buffer buf, BlockNumber newBlkno, bool setNex } else { header->prevBlkno = newBlkno; } - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, NULL, BufferGetBlockNumber(buf), NULL); } static void LogInitRecycleQueuePage(Relation rel, Buffer buf, Buffer leftBuf, Buffer rightBuf) @@ -227,8 +221,6 @@ static void InitRecycleQueueInitialPage(Relation rel, Buffer buf) } END_CRIT_SECTION(); - UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, blkno); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, blkno, NULL); } Buffer ReadRecycleQueueBuffer(Relation rel, BlockNumber blkno) @@ -501,7 +493,6 @@ Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBT if (metaChanged) { MarkBufferDirtyHint(metaBuf, false); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_METADATA, NULL, rel, BufferGetBlockNumber(metaBuf), metaData); } UnlockReleaseBuffer(metaBuf); @@ -522,9 +513,6 @@ void UBTreeRecycleQueuePageChangeEndpointLeftPage(Relation rel, Buffer buf, bool tailItem->next = OtherBlockOffset; } header->flags &= ~endpointFlag; - if (rel == NULL) { - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, BufferGetBlockNumber(buf), NULL); - } } void UBTreeRecycleQueuePageChangeEndpointRightPage(Relation rel, Buffer buf, bool isHead) @@ -543,9 +531,6 @@ void UBTreeRecycleQueuePageChangeEndpointRightPage(Relation rel, Buffer buf, boo Assert(header->head == InvalidOffset); } header->flags |= endpointFlag; - if (rel == NULL) { - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, BufferGetBlockNumber(buf), NULL); - } } static void RecycleQueueChangeEndpoint(Relation rel, Buffer buf, Buffer nextBuf, bool isHead) @@ -583,11 +568,6 @@ static void RecycleQueueChangeEndpoint(Relation rel, Buffer buf, Buffer nextBuf, } END_CRIT_SECTION(); - - UBTRecycleQueueHeader nextHeader = GetRecycleQueueHeader(BufferGetPage(nextBuf), BufferGetBlockNumber(nextBuf)); - UBTRecycleQueueHeader header = GetRecycleQueueHeader(BufferGetPage(buf), BufferGetBlockNumber(buf)); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, nextHeader, rel, BufferGetBlockNumber(nextBuf), NULL); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, BufferGetBlockNumber(buf), NULL); } static Buffer MoveToEndpointPage(Relation rel, Buffer buf, bool needHead, int access) @@ -706,9 +686,6 @@ static void RecycleQueueLinkNewPage(Relation rel, Buffer leftBuf, Buffer newBuf) } END_CRIT_SECTION(); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, leftHeader, rel, leftBlkno, NULL); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, header, rel, blkno, NULL); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER, rightHeader, rel, rightBlkno, NULL); if (header->prevBlkno == header->nextBlkno) { ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( "RecycleQueueLinkNewPage invalid: prev and next page is the same, " @@ -771,7 +748,6 @@ static void TryFixMetaData(Buffer metaBuf, int32 oldval, int32 newval, bool isHe /* update succeed, mark buffer dirty */ if (ConditionalLockBuffer(metaBuf)) { MarkBufferDirty(metaBuf); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_METADATA, NULL, rel, BufferGetBlockNumber(metaBuf), metaData); LockBuffer(metaBuf, BUFFER_LOCK_UNLOCK); } } @@ -909,7 +885,7 @@ static void InsertOnRecycleQueuePage(Relation rel, Buffer buf, uint16 offset, Bl } END_CRIT_SECTION(); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM, header, rel, BufferGetBlockNumber(buf), NULL, offset, true); + UnlockReleaseBuffer(buf); } @@ -949,7 +925,6 @@ void UBTreeXlogRecycleQueueModifyPage(Buffer buf, xl_ubtree2_recycle_queue_modif item->next = header->freeListHead; header->freeListHead = xlrec->offset; } - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM, header, NULL, blkno, NULL, xlrec->offset, xlrec->isInsert); } static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset) @@ -994,7 +969,6 @@ static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset) } END_CRIT_SECTION(); - UBTRecycleQueueExecVerify(USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM, header, rel, BufferGetBlockNumber(buf), NULL, offset, false); if (!(IsNormalOffset(header->head))) { /* deleting the only item on this page */ @@ -1160,174 +1134,3 @@ uint32 UBTreeRecycleQueuePageDump(Relation rel, Buffer buf, bool recordEachItem, return errVerified; } - -static void UBTRecycleQueueExecVerify(int mode, UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, - UBTRecycleMeta metaData, OffsetNumber offnum, bool fromInsert) -{ - BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); - - int module = mode & USTORE_VERIFY_SUB_MOD_MASK; - if (module & USTORE_VERIFY_URQ_SUB_HEADER) { - UBTRecycleQueueVerifyHeader(header, rel, blkno); - } - if (module & USTORE_VERIFY_URQ_SUB_ITEM) { - UBTRecycleQueueVerifyItem(header, rel, blkno, offnum, fromInsert); - } - if (module & USTORE_VERIFY_URQ_SUB_METADATA) { - UBTRecycleMetaDataVerify(metaData, rel, blkno); - } -} - -void UBTRecycleQueueVerifyPageOffline(Relation rel, Page page, BlockNumber blkno) -{ - UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, blkno); - UBTRecycleMeta metaData = NULL; - int mode = USTORE_VERIFY_URQ_SUB_HEADER & USTORE_VERIFY_URQ_SUB_ITEM; - if (IsMetaPage(blkno)) { - metaData = (UBTRecycleMeta)PageGetContents(page); - mode &= USTORE_VERIFY_URQ_SUB_METADATA; - } - UBTRecycleQueueExecVerify(mode, header, rel, blkno, metaData, InvalidOffset, false); -} - -static void UBTRecycleQueueVerifyHeader(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno) -{ - BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - uint32 urqBlocks = MaxBlockNumber; - bool headerError = false; - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - - if (rel != NULL) { - RelationOpenSmgr(rel); - urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); - } - - headerError = (header->flags > (URQ_HEAD_PAGE | URQ_TAIL_PAGE)) || (IsNormalOffset(header->head) && !IsNormalOffset(header->tail)) || - (!IsNormalOffset(header->head) && IsNormalOffset(header->tail)) || (header->freeItems > BlockGetMaxItems(blkno)) || - (header->prevBlkno == header->nextBlkno) || (header->prevBlkno == blkno || header->nextBlkno == blkno) || - (header->prevBlkno >= urqBlocks || header->nextBlkno >= urqBlocks); - - if (headerError) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq header is invalid : flags=%u, head=%d, tail=%d, " - "free_items=%d, free_list_head=%d, prev_blkno=%u, next_blkno=%u, rnode[%u,%u,%u], block %u.", - header->flags, header->head, header->tail, header->freeItems, header->freeListHead, - header->prevBlkno, header->nextBlkno, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - } -} - -static void UBTRecycleQueueVerifyAllItems(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno) -{ - TransactionId maxXid = ReadNewTransactionId(); - TransactionId prevXid = 0; - uint16 itemCount = 0; - uint16 itemMaxNum = BlockGetMaxItems(blkno); - uint16 currOffset = header->head; - uint16 prevOffset = InvalidOffset; - UBTRecycleQueueItem item = NULL; - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - - while (IsNormalOffset(currOffset) && itemCount <= itemMaxNum) { - if (currOffset == itemMaxNum) { - break; - } - item = &header->items[currOffset]; - if (item->prev != prevOffset || item->next == currOffset) { - break; - } - if (item->xid > maxXid || item->xid < prevXid) { - break; - } - itemCount++; - prevXid = item->xid; - prevOffset = currOffset; - currOffset = item->next; - } - - uint16 freelistOffset = header->freeListHead; - while (freelistOffset != InvalidOffset && itemCount <= itemMaxNum) { - if (freelistOffset == itemMaxNum) { - break; - } - item = &header->items[freelistOffset]; - if (item->blkno == InvalidBlockNumber && item->xid == InvalidTransactionId && - item->prev == InvalidOffset) { - itemCount++; - freelistOffset = item->next; - } - } - - if (itemCount + header->freeItems != itemMaxNum) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq items are invalid : (items info : curr_item_offset = %u, " - "prev_offset = %u, item_count = %u, free_list_offset = %u, free_items = %u, next_xid = %ld), " - "rnode[%u,%u,%u], block %u", currOffset, prevOffset, itemCount, freelistOffset, header->freeItems, - maxXid, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - } -} - -static void UBTRecycleQueueVerifyItem(UBTRecycleQueueHeader header, Relation rel, BlockNumber blkno, OffsetNumber offnum, bool fromInsert) -{ - BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - bool itemError = false; - UBTRecycleQueueItem item = NULL; - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - - if (offnum != InvalidOffset) { - item = &header->items[offnum]; - if (fromInsert) { - itemError = (item->blkno == InvalidBlockNumber) || (item->next == offnum); - } else { - itemError = (header->freeListHead != offnum) || (item->xid != InvalidTransactionId) || - (item->blkno != InvalidBlockNumber) || (item->prev != InvalidOffset); - } - if (itemError) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq item is invalid: xid=%ld, blkno=%u, prev=%u, next=%u, rnode[%u,%u,%u], block %u", - item->xid, item->blkno, item->prev, item->next, rNode.spcNode, rNode.dbNode, rNode.relNode, blkno))); - } - } - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - - UBTRecycleQueueVerifyAllItems(header, rel, blkno); -} - -static void UBTRecycleMetaDataVerify(UBTRecycleMeta metaData, Relation rel, BlockNumber metaBlkno) -{ - BYPASS_VERIFY(USTORE_VERIFY_MOD_UBTREE, rel); - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - BlockNumber urqBlocks = MaxBlockNumber; - BlockNumber indexBlocks = metaData->nblocksUpper; - RelFileNode rNode = rel ? rel->rd_node : RelFileNode{InvalidOid, InvalidOid, InvalidOid}; - - if (rel != NULL) { - RelationOpenSmgr(rel); - urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); - indexBlocks = RelationGetNumberOfBlocks(rel); - } - - bool metaError = (metaData->headBlkno == 1 - metaBlkno) || (metaData->tailBlkno == 1 - metaBlkno); - if (!metaError && rel != NULL) { - if (metaData->headBlkno >= urqBlocks || metaData->tailBlkno >= urqBlocks) { - urqBlocks = Max(minRecycleQueueBlockNumber, smgrnblocks(rel->rd_smgr, FSM_FORKNUM)); - metaError = metaData->headBlkno >= urqBlocks || metaData->tailBlkno >= urqBlocks; - } - if (!metaError && metaData->nblocksUpper > indexBlocks) { - indexBlocks = RelationGetNumberOfBlocks(rel); - metaError = metaData->nblocksUpper > indexBlocks; - } - } - if (metaError) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED),errmsg( - "[Verify URQ] urq meta is invalid : (meta info : headBlkno = %u, tailBlkno = %u, " - "nblocksUpper = %u, nblocksLower = %u; urq_blocks = %u, index_blocks = %u), rnode[%u,%u,%u], block %u", - metaData->headBlkno, metaData->tailBlkno, metaData->nblocksUpper, metaData->nblocksLower, - urqBlocks, indexBlocks, rNode.spcNode, rNode.dbNode, rNode.relNode, metaBlkno))); - } -} \ No newline at end of file diff --git a/src/gausskernel/storage/access/ubtree/ubtree.cpp b/src/gausskernel/storage/access/ubtree/ubtree.cpp index 404576b15c..6cf8d2dbe3 100644 --- a/src/gausskernel/storage/access/ubtree/ubtree.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtree.cpp @@ -1220,7 +1220,8 @@ static void IndexPageShiftBase(Relation rel, Page page, int64 delta, bool needWa } END_CRIT_SECTION(); - UBTreeVerifyAll(rel, page, BufferGetBlockNumber(buf), InvalidOffsetNumber, false); + UBTreeVerify(rel, page, BufferGetBlockNumber(buf)); + WHITEBOX_TEST_STUB("IndexPageShiftBase-end", WhiteboxDefaultErrorEmit); } diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index c0ddd21453..ec3d824e62 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -87,7 +87,7 @@ bool VerifyAndDoUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoR urec->SetUrp(toUrecptr); UndoTraversalState rc = FetchUndoRecord(urec, NULL, InvalidBlockNumber, InvalidOffsetNumber, InvalidTransactionId, false, NULL); - DELETE_EX(urec); + DELETE_EX(urec); /* already processed. */ if (rc != UNDO_TRAVERSAL_COMPLETE) { ereport(ERROR, (errmodule(MOD_USTORE), @@ -97,7 +97,7 @@ bool VerifyAndDoUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoR } } -/* + /* * Fetch the multiple undo records which can fit into uur_segment; sort * them in order of reloid and block number then apply them together * page-wise. Repeat this until we get invalid undo record pointer. @@ -174,14 +174,7 @@ bool VerifyAndDoUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoR return true; } -/* - * execute_undo_actions - Execute the undo actions - * - * xid - Transaction id that is getting rolled back. - * fromUrecptr - undo record pointer from where to start applying undo action. - * toUrecptr - undo record pointer upto which point apply undo action. - * isTopTxn - true if rollback is for top transaction. - */ + void ExecuteUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoRecPtr toUrecptr, UndoSlotPtr slotPtr, bool isTopTxn, UndoPersistence plevel) { diff --git a/src/gausskernel/storage/access/ustore/knl_upage.cpp b/src/gausskernel/storage/access/ustore/knl_upage.cpp index f23451476e..f4db09294d 100644 --- a/src/gausskernel/storage/access/ustore/knl_upage.cpp +++ b/src/gausskernel/storage/access/ustore/knl_upage.cpp @@ -749,7 +749,7 @@ void UpageVerify(UHeapPageHeader header, XLogRecPtr lastRedo, TupleDesc tupDesc, void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, RelFileNode* rNode, BlockNumber blkno, bool isRedo) { if (lastRedo != InvalidXLogRecPtr && PageGetLSN(header) < lastRedo) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|HEADER] Current lsn(%X/%X) in page is smaller than last checkpoint(%X/%X)), " "rnode[%u,%u,%u], block %u.", (uint32)(PageGetLSN(header) >> HIGH_BITS_LENGTH_OF_LSN), (uint32)PageGetLSN(header), (uint32)(lastRedo >> HIGH_BITS_LENGTH_OF_LSN), (uint32)lastRedo, @@ -759,20 +759,20 @@ void UpageVerifyHeader(UHeapPageHeader header, XLogRecPtr lastRedo, RelFileNode* if (unlikely(header->pd_lower < (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(header)) || header->pd_lower > header->pd_upper || header->pd_upper > header->pd_special || header->potential_freespace > BLCKSZ || header->pd_special != BLCKSZ)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|HEADER] lower = %u, upper = %u, special = %u, potential = %u," " rnode[%u,%u,%u], block %u.", header->pd_lower, header->pd_upper, header->pd_special, header->potential_freespace, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); } if (header->td_count <= 0 || header->td_count > UHEAP_MAX_TD) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|HEADER] tdcount invalid: tdcount = %u, rnode[%u,%u,%u], block %u.", header->td_count, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); } if (TransactionIdFollows(header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|HEADER] prune_xid invalid: prune_xid = %lu, nextxid = %lu." " rnode[%u,%u,%u], block %u.", header->pd_prune_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno))); @@ -799,7 +799,7 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleD td_info.td_slot = tdSlot; if ((tdSlot != UHEAPTUP_SLOT_FROZEN)) { if (tdSlot < 1 || tdSlot > header->td_count) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE]tdSlot out of bounds, tdSlot = %d, td_count = %d, " "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, header->td_count, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -815,7 +815,7 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleD TransactionId xid = this_trans->xactid; if (TransactionIdFollows(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE]tdxid invalid: tdSlot = %d, tdxid = %lu, nextxid = %lu, " "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -823,7 +823,7 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleD if (TransactionIdIsValid(xid) && !UHeapTransactionIdDidCommit(xid) && TransactionIdPrecedes(xid, g_instance.undo_cxt.globalFrozenXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE]tdxid %lu in tdslot(%d) is smaller than global frozen xid %lu, " "rnode[%u,%u,%u], block %u, offnum %u.", xid, tdSlot, g_instance.undo_cxt.globalFrozenXid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -833,7 +833,7 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleD if (!hasInvalidXact && IS_VALID_UNDO_REC_PTR(td_info.urec_add) && (!TransactionIdIsValid(td_info.xid) || (TransactionIdIsValid(tupXid) && !TransactionIdEquals(td_info.xid, tupXid)))) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE] tup xid inconsistency with td: tupxid = %lu, tdxid = %lu, urp %lu, " "rnode[%u,%u,%u], block %u, offnum %u.", tupXid, td_info.xid, td_info.urec_add, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -844,7 +844,7 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleD int tupSize = (rel == NULL) ? 0 : CalTupSize(rel, diskTuple, tupDesc); if (tupSize > (int)RowPtrGetLen(rp) || (diskTuple->reserved != 0 && diskTuple->reserved != 0xFF)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE]corrupted tuple: tupsize = %d, rpsize = %u, " "rnode[%u,%u,%u], block %u, offnum %u.", tupSize, RowPtrGetLen(rp), rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -857,14 +857,14 @@ static void UpageVerifyTuple(UHeapPageHeader header, OffsetNumber offnum, TupleD if (hasInvalidXact) { if (!UHeapTransactionIdDidCommit(tupXid) && !t_thrd.xlog_cxt.InRecovery) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE] tup xid not commit, tupxid = %lu, " "rnode[%u,%u,%u], block %u, offnum %u.", tupXid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); return; } if (TransactionIdEquals(td_info.xid, tupXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|TUPLE] td reused but xid equal td: tupxid = %lu, tdxid = %lu, " "rnode[%u,%u,%u], block %u, offnum %u.", tupXid, td_info.xid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -889,7 +889,7 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(page); if (!RowPtrIsNormal(rowPtr)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr is abnormal (flags:%d, offset %d, len %d), " "rnode[%u,%u,%u], block %u, offnum %u.", rowPtr->flags, offset, len, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -897,7 +897,7 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re } if (tdSlot < 1 || tdSlot > phdr->td_count) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Invalid tdSlot %d, td count of page is %d, " "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, phdr->td_count, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -909,7 +909,7 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re TransactionId tdXid = thistrans->xactid; if (UHEAP_XID_IS_LOCK(diskTuple->flag)) { if (!TransactionIdEquals(locker, topXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] locker invalid: locker %lu, topxid %lu, " "rnode[%u,%u,%u], block %u, offnum %u.", locker, topXid, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum))); @@ -917,7 +917,7 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re } } else if (!IS_VALID_UNDO_REC_PTR(tdUrp) || hasInvalidXact || !TransactionIdEquals(tdXid, locker) || !TransactionIdEquals(tdXid, topXid) || !TransactionIdEquals(tdXid, tupXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Td xid invalid: tdSlot %d, tdxid %lu, topxid %lu, " "tupxid %lu, isInvalidSlot %d, rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, tdXid, topXid, tupXid, hasInvalidXact, @@ -936,7 +936,7 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re uint32 tupLen = SHORTALIGN(RowPtrGetLen(rp)); if (tupOffset < offset) { if (tupOffset + tupLen > offset) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr data is abnormal, flags %d, offset %u," " len %d, alignTupLen %u, targetRpOffset %u, " "rnode[%u,%u,%u], block %u, offnum %u, offnum2 %u.", @@ -944,7 +944,7 @@ static void UpageVerifyRowptr(RowPtr *rowPtr, Page page, OffsetNumber offnum, Re rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, offnum, i))); } } else if (offset + len > tupOffset) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ROWPTR] Rowptr data is abnormal, flags %d, offset %u," " len %d, alignTupLen %u, targetRpOffset %u, targetRpLen %u, " "rnode[%u,%u,%u], block %u, offnum %u, offnum2 %u.", @@ -972,7 +972,7 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, Blo sortPtr->end = sortPtr->start + (int)SHORTALIGN(RowPtrGetLen(rp)); sortPtr->offset = i; if (sortPtr->start < header->pd_upper || sortPtr->end > header->pd_special) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ALLROWPTR] rpstart = %u, rplen = %u, pdlower = %u, pdupper = %u, " "rnode[%u,%u,%u], block %u, offnum %u.", RowPtrGetOffset(rp), RowPtrGetLen(rp), header->pd_lower, header->pd_upper, @@ -983,7 +983,7 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, Blo } else if (RowPtrIsDeleted(rp)) { tdSlot = RowPtrGetTDSlot(rp); if (tdSlot == UHEAPTUP_SLOT_FROZEN) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ALLROWPTR] tdslot frozen, tdSlot = %d, " "rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, i))); @@ -991,7 +991,7 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, Blo } if (tdSlot < 1 || tdSlot > header->td_count) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ALLROWPTR] tdSlot out of bounds, tdSlot = %d, " "td_count = %d, rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, header->td_count, rNode->spcNode, rNode->dbNode, rNode->relNode, blkno, i))); @@ -1001,7 +1001,7 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, Blo UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(header); TD * this_trans = &tdPtr->td_info[tdSlot - 1]; if (TransactionIdFollows(this_trans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ALLROWPTR] tdxid invalid: tdSlot %d, tdxid = %lu, " "nextxid = %lu, rnode[%u,%u,%u], block %u, offnum %u.", tdSlot, this_trans->xactid, t_thrd.xact_cxt.ShmemVariableCache->nextXid, @@ -1024,7 +1024,7 @@ static void UpageVerifyAllRowptr(UHeapPageHeader header, RelFileNode* rNode, Blo RpSort temp_ptr1 = &rowptrs[i]; RpSort temp_ptr2 = &rowptrs[i + 1]; if (temp_ptr1->end > temp_ptr2->start) { - ereport(ustore_verify_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), errmsg("[UPAGE_VERIFY|ALLROWPTR]corrupted line pointer: rp1offnum %u, rp1start = %u, rp1end = %u, " "rp2offnum = %u, rp2start = %u, rp2end = %u, rnode[%u,%u,%u], block %u.", temp_ptr1->offset, temp_ptr1->start, temp_ptr1->end, diff --git a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp index d673af59dd..38e96ae3d8 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp @@ -634,79 +634,50 @@ void UndoRecordVerify(_in_ UndoRecord *urec) CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) if (!TransactionIdIsValid(urec->Xid())) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu is invalid, urp %lu"), urec->Xid(), urec->Urp()))); + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, xid %lu invalid, urp %lu"), urec->Xid(), urec->Urp()))); } if (TransactionIdIsValid(urec->Xid()) && TransactionIdFollowsOrEquals(urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. xid %lu >= nextXid %lu, urp %lu"), + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, xid %lu >= nextXid %lu, urp %lu"), urec->Xid(), t_thrd.xact_cxt.ShmemVariableCache->nextXid, urec->Urp()))); } - if (TransactionIdIsValid(urec->OldXactId()) && - TransactionIdFollowsOrEquals(urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. oldXactId %lu >= nextXid %lu, urp %lu"), - urec->OldXactId(), t_thrd.xact_cxt.ShmemVariableCache->nextXid, urec->Urp()))); - } if (!(IS_VALID_UNDO_REC_PTR(urec->Urp()))) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu is invalid"), urec->Urp()))); - return; + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, urp %lu invalid"), urec->Urp()))); } - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) int zoneId = (int)UNDO_PTR_GET_ZONE_ID(urec->Urp()); undo::UndoZone *uzone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); - Assert(uzone != NULL); if (uzone == NULL) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uzone is null. zoneId %d urp %lu"), zoneId, urec->Urp()))); + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, zone is null. zoneId %d, urp %lu"), zoneId, urec->Urp()))); return; } if (IS_VALID_UNDO_REC_PTR(urec->Urp()) && urec->Urp() > uzone->GetInsertURecPtr()) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. urp %lu > insertURecPtr %lu, zoneId %d"), + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, urp %lu > insertURecPtr %lu, zoneId %d"), urec->Urp(), uzone->GetInsertURecPtr(), zoneId))); } - if (IS_VALID_UNDO_REC_PTR(urec->Blkprev())) { - UndoRecPtr blkPrevZid = UNDO_PTR_GET_ZONE_ID(urec->Blkprev()); - undo::UndoZone *blkPrevZone = undo::UndoZoneGroup::GetUndoZone(blkPrevZid, false); - if (urec->Blkprev() > blkPrevZone->GetInsertURecPtr()) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. Blkprev %lu > insertURecPtr %lu, zoneId %d, urp %lu"), - urec->Blkprev(), uzone->GetInsertURecPtr(), zoneId, urec->Urp()))); - } - } - if ((urec->Uinfo() & UNDO_UREC_INFO_TRANSAC) != 0 || (urec->Uinfo() & UNDO_UREC_INFO_BLOCK) != 0) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d error, urp %lu"), - (int)urec->Uinfo(), urec->Urp()))); - } if ((urec->Uinfo() & UNDO_UREC_INFO_OLDTD) != 0 && !TransactionIdIsValid(urec->OldXactId())) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, oldXactId %lu is invalid, urp %lu"), + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, uinfo %d, oldXactId %lu is invalid, urp %lu"), (int)urec->Uinfo(), urec->OldXactId(), urec->Urp()))); } if ((urec->Uinfo() & UNDO_UREC_INFO_HAS_PARTOID) != 0 && urec->Partitionoid() == InvalidOid) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, partitionoid is invalid, urp %lu"), + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, uinfo %d, partitionoid is invalid, urp %lu"), (int)urec->Uinfo(), urec->Urp()))); } if ((urec->Uinfo() & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0 && urec->Tablespace() == InvalidOid) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. uinfo %d, tablespace is invalid, urp %lu"), + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, uinfo %d, tablespace is invalid, urp %lu"), (int)urec->Uinfo(), urec->Urp()))); } if (urec->Utype() <= UNDO_UNKNOWN || urec->Utype() > UNDO_UPDATE) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d is invalid, urp %lu"), + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoRecordVerify invalid, utype %d is invalid, urp %lu"), urec->Utype(), urec->Urp()))); } - if ((urec->Utype() == UNDO_INSERT && urec->PayLoadLen() != 0) || - (urec->Utype() == UNDO_INSERT && (urec->Uinfo() & UNDO_UREC_INFO_PAYLOAD) != 0)) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[UNDO_RECORD_VERIFY]failed. utype %d , payLoadLen %hu, uinfo %d, urp %lu"), - urec->Utype(), urec->PayLoadLen(), (int)urec->Uinfo(), urec->Urp()))); - } } diff --git a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp index 96df2f80fd..0c6cb371c6 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp @@ -667,7 +667,7 @@ void ForceVacuumUHeapRelBypass(Relation onerel, VacuumStmt *vacstmt, BufferAcces vac_close_indexes(nindexes, irel, NoLock); } - /* SETP 6: cleanup */ + /* STEP 6: cleanup */ for (int i = 0; i < nindexes; i++) { /* summarize the index status information */ if (indstats[i] != NULL) { diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index 77e55b0629..a6a85602da 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -138,7 +138,6 @@ void PrepareUndoMeta(XlogUndoMeta *meta, UndoPersistence upersistence, UndoRecPt uzone->MarkDirty(); } uzone->AdvanceInsertURecPtr(UNDO_PTR_GET_OFFSET(lastRecord), lastRecordSize); - UndoZoneVerifyPtr(uzone); if (uzone->GetForceDiscardURecPtr() > uzone->GetInsertURecPtr()) { ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("zone %d forceDiscardURecPtr %lu > insertURecPtr %lu."), uzone->GetZoneId(), uzone->GetForceDiscardURecPtr(), uzone->GetInsertURecPtr()))); @@ -160,9 +159,9 @@ void FinishUndoMeta(UndoPersistence upersistence) if (uzone == NULL) { ereport(PANIC, (errmsg("FinishUndoMeta: uzone is NULL"))); } - UndoZoneVerify(uzone); uzone->GetSlotBuffer().UnLock(); uzone->UnlockUndoZone(); + UndoZoneVerify(uzone); return; } @@ -212,7 +211,7 @@ void UpdateTransactionSlot(TransactionId xid, XlogUndoMeta *meta, UndoRecPtr sta meta->SetInfo(XLOG_UNDOMETA_INFO_SLOT); Assert(meta->dbid != INVALID_DB_OID); } - UndoTranslotVerifyPtr(slot, INVALID_UNDO_SLOT_PTR); + TransactionSlotVerify(slot, t_thrd.undo_cxt.slotPtr[upersistence]); return; } @@ -245,6 +244,7 @@ void RedoUndoMeta(XLogReaderState *record, XlogUndoMeta *meta, UndoRecPtr startU zone->MarkDirty(); zone->SetLSN(lsn); zone->UnlockUndoZone(); + UndoZoneVerify(zone); } UndoSlotPtr slotPtr = MAKE_UNDO_PTR(zone->GetZoneId(), meta->slotPtr); if (!IsSkipInsertSlot(slotPtr)) { @@ -267,7 +267,6 @@ void RedoUndoMeta(XLogReaderState *record, XlogUndoMeta *meta, UndoRecPtr startU } UnlockReleaseBuffer(buf.Buf()); } - UndoZoneVerify(zone); return; } @@ -644,7 +643,7 @@ void AllocateUndoZone() if (!g_instance.attr.attr_storage.enable_ustore) { return; } - AllocateZonesBeforXid(); + AllocateZonesBeforeXid(); #endif } @@ -660,7 +659,6 @@ void RedoRollbackFinish(UndoSlotPtr slotPtr, XLogRecPtr lsn) slot->UpdateRollbackProgress(); PageSetLSN(page, lsn); MarkBufferDirty(buf.Buf()); - UndoTranslotVerify(slot, slotPtr); } UnlockReleaseBuffer(buf.Buf()); } @@ -714,7 +712,6 @@ void UpdateRollbackFinish(UndoSlotPtr slotPtr) } MarkBufferDirty(buf.Buf()); END_CRIT_SECTION(); - UndoTranslotVerify(slot, slotPtr); UnlockReleaseBuffer(buf.Buf()); return; } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index 0f524d0213..a513dffada 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -109,7 +109,7 @@ bool AsyncRollback(UndoZone *zone, UndoSlotPtr recycle, TransactionSlot *slot) if (!u_sess->attr.attr_storage.enable_ustore_async_rollback) { return true; } - UndoTranslotVerify(slot, recycle); + TransactionSlotVerify(slot, recycle); UndoRecPtr prev = GetPrevUrp(slot->EndUndoPtr()); AddRollbackRequest(slot->XactId(), prev, slot->StartUndoPtr(), slot->DbId(), recycle); @@ -935,6 +935,7 @@ void UndoRecycleMain() oldestFrozenXidInUndo, frozenXid))); if (RecycleUndoSpace(zone, recycleXmin, frozenXid, &recycleXid, forceRecycleXid, oldestXmin)) { recycled = true; + UndoZoneVerify(zone); } isAnyZoneUsed = true; ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg( @@ -943,7 +944,6 @@ void UndoRecycleMain() oldestFrozenXidInUndo, frozenXid))); oldestFrozenXidInUndo = oldestFrozenXidInUndo > frozenXid ? frozenXid : oldestFrozenXidInUndo; UpdateRecyledXid(recycleMaxXIDs, &recycleMaxXIDCount, recycleXid); - UndoZoneVerify(zone); } } } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp index 333a6d1a36..62c2b8eff8 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp @@ -455,7 +455,6 @@ void UndoSpace::RecoveryUndoSpace(int fd, UndoSpaceType type) segSize = USEG_SIZE(UNDO_DB_OID); } pg_atomic_fetch_add_u32(&g_instance.undo_cxt.undoTotalSize, usp->Used(zoneId)); - UndoZoneVerify(uzone); uint64 transUndoThresholdSize = GET_UNDO_LIMIT_SIZE_PER_XACT * BLCKSZ; const uint64 MAX_OFFSET = (UNDO_LOG_MAX_SIZE - transUndoThresholdSize) - segSize; if (usp->Tail() < usp->Head() || usp->Tail() > MAX_OFFSET) { diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp index 913be07b89..a365fd08cd 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp @@ -136,7 +136,6 @@ TransactionSlot *UndoSlotBuffer::FetchTransactionSlot(UndoSlotPtr slotPtr) PageInit(page, BLCKSZ, 0); } TransactionSlot *slot = (TransactionSlot *)((char *)page + slotOffset); - UndoTranslotVerifyBuffer(slotPtr); return slot; } @@ -325,107 +324,31 @@ UndoSlotPtr GetNextSlotPtr(UndoSlotPtr slotPtr) return MAKE_UNDO_PTR(UNDO_PTR_GET_ZONE_ID(slotPtr), offset); } -static void verifyXid(TransactionSlot *slot) -{ - UNDO_BYPASS_VERIFY; - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - TransactionId xid = slot->XactId(); - if (!TransactionIdIsValid(xid)) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. slot xactId %lu is invalid"), xid))); - return; - } - if (TransactionIdIsValid(xid) && - TransactionIdFollowsOrEquals(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. slot xactId %lu >= nextXid %lu"), - xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } -} - -void UndoTranslotVerifyPtr(TransactionSlot *slot, UndoSlotPtr slotPtr) +void TransactionSlotVerify(TransactionSlot *slot, UndoSlotPtr slotPtr) { UNDO_BYPASS_VERIFY; CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) int zoneId = (int)UNDO_PTR_GET_ZONE_ID(slot->StartUndoPtr()); UndoZone *zone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); - if (slot->StartUndoPtr() > slot->EndUndoPtr() || slot->EndUndoPtr() > zone->GetInsertURecPtr()) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. startUndoPtr %lu , endUndoPtr %lu, zoneId %d, insertUrecPtr %lu "), - slot->StartUndoPtr(), slot->EndUndoPtr(), zoneId, zone->GetInsertURecPtr()))); + if (slot->StartUndoPtr() > slot->EndUndoPtr() || slot->EndUndoPtr() > zone->GetInsertURecPtr() || + zoneId != (int)UNDO_PTR_GET_ZONE_ID(slot->EndUndoPtr()) || + zoneId != (int)UNDO_PTR_GET_ZONE_ID(slotPtr)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("TransactionSlotVerify invalid: slotPtr %lu, startUndoPtr %lu , endUndoPtr %lu, zoneId %d, " + "insertUrecPtr %lu.", slotPtr, slot->StartUndoPtr(), slot->EndUndoPtr(), zoneId, + zone->GetInsertURecPtr()))); } - if (zoneId != (int)UNDO_PTR_GET_ZONE_ID(slot->EndUndoPtr()) || - (slotPtr != INVALID_UNDO_SLOT_PTR && zoneId != (int)UNDO_PTR_GET_ZONE_ID(slotPtr))) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. startUndoPtr %lu and endUndoPtr %lu have different zoneIds zid %d, insert %lu "), - slot->StartUndoPtr(), slot->EndUndoPtr(), zoneId, zone->GetInsertURecPtr()))); + TransactionId xid = slot->XactId(); + if (!TransactionIdIsValid(xid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("TransactionSlotVerify invalid: slotPtr %lu, xid is invalid %lu.", slotPtr, xid))); } -} - -void UndoTranslotVerifyBuffer(UndoSlotPtr slotPtr) -{ - UNDO_BYPASS_VERIFY; - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - TransactionSlot *slot = NULL; - RelFileNode rnode; - UNDO_PTR_ASSIGN_REL_FILE_NODE(rnode, slotPtr, UNDO_SLOT_DB_OID); - Buffer buffer = ReadUndoBufferWithoutRelcache(rnode, UNDO_FORKNUM, UNDO_PTR_GET_BLOCK_NUM(slotPtr), RBM_NORMAL, - NULL, RELPERSISTENCE_PERMANENT); - Page page = BufferGetPage(buffer); - VerifyPageHeader(page); - - UndoSlotOffset prevEndUndoPtr = INVALID_UNDO_SLOT_PTR; - TransactionId prevXid = InvalidTransactionId; - - for (uint32 offset = UNDO_LOG_BLOCK_HEADER_SIZE; offset < BLCKSZ - MAXALIGN(sizeof(TransactionSlot)); - offset += MAXALIGN(sizeof(TransactionSlot))) { - slot = (TransactionSlot *)(page + offset); - - int zoneId = (int)UNDO_PTR_GET_ZONE_ID(slot->StartUndoPtr()); - UndoZone *zone = undo::UndoZoneGroup::GetUndoZone(zoneId, false); - if (zone == NULL) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. zone is null. zoneId %d, startUndoPtr %lu, offset %u"), - zoneId, slot->StartUndoPtr(), offset))); - break; - } - - UndoSlotPtr currSlotPtr = slotPtr - slotPtr % BLCKSZ + offset; - if (currSlotPtr >= zone->GetAllocateTSlotPtr()) { - break; - } - - if (prevEndUndoPtr != INVALID_UNDO_SLOT_PTR && slot->StartUndoPtr() != prevEndUndoPtr) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. startUndoPtr%lu is not equal to prevEndUndoPtr %lu"), - slot->StartUndoPtr(), prevEndUndoPtr))); - } - prevEndUndoPtr = slot->EndUndoPtr(); - if (TransactionIdIsValid(prevXid) && prevXid >= slot->XactId()) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDO_TRANSLOT]failed. prevXid %lu >= xactId %lu"), - slot->StartUndoPtr(), prevEndUndoPtr))); - } - prevXid = slot->XactId(); - verifyXid(slot); - UndoTranslotVerifyPtr(slot, currSlotPtr); + if (TransactionIdIsValid(xid) && TransactionIdFollowsOrEquals(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { + ereport(defence_errlevel(), (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("TransactionSlotVerify invalid: slotPtr %lu, xid %lu, nextXid %lu.", slotPtr, xid, + t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } - ReleaseBuffer(buffer); -} - -void UndoTranslotVerify(TransactionSlot *slot, UndoSlotPtr slotPtr) -{ - UNDO_BYPASS_VERIFY; - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - verifyXid(slot); - UndoTranslotVerifyPtr(slot, slotPtr); - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_COMPLETE) - UndoTranslotVerifyBuffer(slotPtr); } } // namespace undo diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp index 4fe7df7266..c5d4309087 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp @@ -76,13 +76,7 @@ bool UndoZone::CheckNeedSwitch(void) { if (insertURecPtr_ < forceDiscardURecPtr_ || allocateTSlotPtr_ < recycleTSlotPtr_ || undoSpace_.Tail() < undoSpace_.Head() || slotSpace_.Tail() < slotSpace_.Head()) { - ereport(WARNING, (errmsg("cannot use this zone, undometacheck failed." - "zoneid: %d, insertURecPtr_: %lu, forceDiscardURecPtr_: %lu, discardURecPtr_: %lu," - "allocateTSlotPtr_: %lu, recycleTSlotPtr_: %lu." - "undoSpace: head %lu, tail %lu. slotSpace: head %lu, tail %lu.", - zid_, insertURecPtr_, forceDiscardURecPtr_, discardURecPtr_, allocateTSlotPtr_, - recycleTSlotPtr_, undoSpace_.Head(), undoSpace_.Tail(), slotSpace_.Head(), - slotSpace_.Tail()))); + LogUndoZoneInfo(this, WARNING, "Current Zone is not available, need to switch"); return true; } uint64 transUndoThresholdSize = GET_UNDO_LIMIT_SIZE_PER_XACT * BLCKSZ; @@ -942,7 +936,7 @@ UndoZone* UndoZoneGroup::GetUndoZone(int zid, bool isNeedInitZone) return uzone; } -void AllocateZonesBeforXid() +void AllocateZonesBeforeXid() { const int MAX_RETRY_TIMES = 3; int retry_times = 0; @@ -965,16 +959,7 @@ void AllocateZonesBeforXid() int tmpZid = zid - (int)upersistence * PERSIST_ZONE_COUNT; g_instance.undo_cxt.uZoneBitmap[upersistence] = bms_del_member(g_instance.undo_cxt.uZoneBitmap[upersistence], tmpZid); - ereport(LOG, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("cached zone %d not available, pid %lu, cur pid %lu. " - "zoneInfo: insertUrecPtr %lu, discardUrecPtr %lu, forceDiscardUrecPtr %lu, " - "allocateTSlotPtr %lu, recycleTSlotPtr %lu, recycleXid %lu, frozenXid %lu, " - "undoSpaceInfo: head %lu, tail %lu, slotSpaceInfo: head %lu, tail %lu."), - zid, uzone->GetAttachPid(), u_sess->attachPid, - uzone->GetInsertURecPtr(), uzone->GetDiscardURecPtr(), uzone->GetForceDiscardURecPtr(), - uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetRecycleXid(), - uzone->GetFrozenXid(), uzone->GetUndoSpace()->Head(), uzone->GetUndoSpace()->Tail(), - uzone->GetSlotSpace()->Head(), uzone->GetSlotSpace()->Tail()))); + LogUndoZoneInfo(uzone, LOG, "Current Zone is not available, need to switch"); } } DECLARE_NODE_NO(); @@ -1016,17 +1001,8 @@ reallocate_zone: goto reallocate_zone; } if (uzone->CheckNeedSwitch()) { - ereport(LOG, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("zone %d not available, pid %lu, cur pid %lu. " - "zoneInfo: insertUrecPtr %lu, discardUrecPtr %lu, forceDiscardUrecPtr %lu, " - "allocateTSlotPtr %lu, recycleTSlotPtr %lu, recycleXid %lu, frozenXid %lu, " - "undoSpaceInfo: head %lu, tail %lu, slotSpaceInfo: head %lu, tail %lu."), - zid, uzone->GetAttachPid(), u_sess->attachPid, - uzone->GetInsertURecPtr(), uzone->GetDiscardURecPtr(), uzone->GetForceDiscardURecPtr(), - uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetRecycleXid(), - uzone->GetFrozenXid(), uzone->GetUndoSpace()->Head(), uzone->GetUndoSpace()->Tail(), - uzone->GetSlotSpace()->Head(), uzone->GetSlotSpace()->Tail()))); - goto reallocate_zone; + LogUndoZoneInfo(uzone, LOG, "Current Zone is not available, need to switch"); + goto reallocate_zone; } uzone->Attach(); LWLockRelease(UndoZoneLock); @@ -1039,60 +1015,23 @@ reallocate_zone: return; } -void UndoZoneVerifyPtr(UndoZone *uzone) +void UndoZoneVerify(UndoZone *uzone) { UNDO_BYPASS_VERIFY; CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); if (uzone->GetInsertURecPtr() < uzone->GetForceDiscardURecPtr() || uzone->GetForceDiscardURecPtr() < uzone->GetDiscardURecPtr() || uzone->GetAllocateTSlotPtr() < uzone->GetRecycleTSlotPtr() || uzone->GetUndoSpace()->Tail() < uzone->GetUndoSpace()->Head() || uzone->GetSlotSpace()->Tail() < uzone->GetSlotSpace()->Head()) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDOZONE]failed. insertPtr %lu, forceDiscardPtr %lu, discardPtr %lu. " - "allocateTSlotPtr %lu, recycleTSlotPtr %lu, " - "undoInfo: tail %lu, head %lu. slotInfo: tail %lu, head %lu"), - uzone->GetInsertURecPtr(), uzone->GetForceDiscardURecPtr(), uzone->GetDiscardURecPtr(), - uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetUndoSpace()->Tail(), - uzone->GetUndoSpace()->Head(), uzone->GetSlotSpace()->Tail(), uzone->GetSlotSpace()->Head()))); - } - LWLockRelease(UndoZoneLock); -} - -void UndoZoneVerify(UndoZone *uzone) -{ - UNDO_BYPASS_VERIFY; - - CHECK_VERIFY_LEVEL(USTORE_VERIFY_FAST) - UndoZoneVerifyPtr(uzone); - - LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); - UndoLogOffset insert = UNDO_PTR_GET_OFFSET(uzone->GetInsertURecPtr()); - UndoLogOffset tail = insert + UNDO_LOG_SEGMENT_SIZE - insert % UNDO_LOG_SEGMENT_SIZE; - UndoLogOffset forceDiscard = UNDO_PTR_GET_OFFSET(uzone->GetForceDiscardURecPtr()); - UndoLogOffset head = (forceDiscard / UNDO_LOG_SEGMENT_SIZE) * UNDO_LOG_SEGMENT_SIZE; - if (tail != UNDO_PTR_GET_OFFSET(uzone->GetUndoSpace()->Tail()) || - head != UNDO_PTR_GET_OFFSET(uzone->GetUndoSpace()->Head())) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDOZONE]failed. insertPtr %lu, forceDiscardPtr %lu, tail %lu, head %lu. " - "zoneInfo: insertPtr %lu, forceDiscardPtr %lu, tail %lu, head %lu"), - insert, forceDiscard, tail, head, uzone->GetInsertURecPtr(), uzone->GetForceDiscardURecPtr(), - uzone->GetUndoSpace()->Tail(), uzone->GetUndoSpace()->Head()))); + ereport(defence_errlevel(), (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT("UndoZoneVerify invalid: insertPtr %lu, forceDiscardPtr %lu, discardPtr %lu. " + "allocateTSlotPtr %lu, recycleTSlotPtr %lu, " + "undoSpaceInfo: tail %lu, head %lu. undoSlotInfo: tail %lu, head %lu"), + uzone->GetInsertURecPtr(), uzone->GetForceDiscardURecPtr(), uzone->GetDiscardURecPtr(), + uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetUndoSpace()->Tail(), + uzone->GetUndoSpace()->Head(), uzone->GetSlotSpace()->Tail(), uzone->GetSlotSpace()->Head()))); } - UndoSlotPtr allocate = UNDO_PTR_GET_OFFSET(uzone->GetAllocateTSlotPtr()); - UndoLogOffset slotTail = (UndoLogOffset)(allocate + UNDO_META_SEGMENT_SIZE - allocate % UNDO_META_SEGMENT_SIZE); - UndoSlotPtr recycle = UNDO_PTR_GET_OFFSET(uzone->GetRecycleTSlotPtr()); - UndoLogOffset slotHead = (UndoLogOffset)((recycle / UNDO_META_SEGMENT_SIZE) * UNDO_META_SEGMENT_SIZE); - if (slotTail != UNDO_PTR_GET_OFFSET(uzone->GetSlotSpace()->Tail()) || - slotHead != UNDO_PTR_GET_OFFSET(uzone->GetSlotSpace()->Head())) { - ereport(WARNING, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("[VERIFY_UNDOZONE]failed. allocatePtr %lu, recyclePtr %lu, tail %lu, head %lu. " - "zoneInfo: allocatePtr %lu, recyclePtr %lu, tail %lu, head %lu"), - allocate, recycle, slotTail, slotHead, uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), - uzone->GetSlotSpace()->Tail(), uzone->GetSlotSpace()->Head()))); - } - LWLockRelease(UndoZoneLock); } } // namespace undo diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h index dfab226f0e..85e06682ac 100644 --- a/src/include/access/reloptions.h +++ b/src/include/access/reloptions.h @@ -264,7 +264,7 @@ extern bytea* extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoption extern relopt_value* parseRelOptions(Datum options, bool validate, relopt_kind kind, int* numrelopts); extern void* allocateReloptStruct(Size base, relopt_value* options, int numoptions); extern void fillRelOptions(void* rdopts, Size basesize, relopt_value* options, int numoptions, bool validate, - const relopt_parse_elt* elems, int nelems, bool kindIsHeap = false); + const relopt_parse_elt* elems, int nelems); extern void fillTdeRelOptions(List *options, char relkind); extern bytea* default_reloptions(Datum reloptions, bool validate, relopt_kind kind); extern bytea* heap_reloptions(char relkind, Datum reloptions, bool validate); diff --git a/src/include/access/ubtree.h b/src/include/access/ubtree.h index 7a6a67d8e8..bbb0ce55e2 100644 --- a/src/include/access/ubtree.h +++ b/src/include/access/ubtree.h @@ -673,12 +673,8 @@ extern void UBTreeDumpRecycleQueueFork(Relation rel, UBTRecycleForkNumber forkNu extern void UBTreeBuildCallback(Relation index, HeapTuple htup, Datum *values, const bool *isnull, bool tupleIsAlive, void *state); -// verify urq -void UBTRecycleQueueVerifyPageOffline(Relation rel, Page page, BlockNumber blkno); - -// verify ubtree -void UBTreeVerifyPage(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); -void UBTreeVerifyAll(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum, bool fromInsert); +void UBTreeVerify(Relation rel, Page page, BlockNumber blkno, OffsetNumber offnum = InvalidOffsetNumber, + bool fromInsert = false); void UBTreeRecordGetNewPageCost(UBTreeGetNewPageStats* stats, NewPageCostType type, TimestampTz start); #endif /* UBTREE_H */ diff --git a/src/include/access/ustore/knl_uverify.h b/src/include/access/ustore/knl_uverify.h index bac2755db5..a72106e6c8 100644 --- a/src/include/access/ustore/knl_uverify.h +++ b/src/include/access/ustore/knl_uverify.h @@ -40,12 +40,6 @@ #define USTORE_VERIFY_MOD_REDO 0x00080000 #define USTORE_VERIFY_MOD_MASK (USTORE_VERIFY_MOD_UPAGE | USTORE_VERIFY_MOD_UBTREE | USTORE_VERIFY_MOD_UNDO | USTORE_VERIFY_MOD_REDO) -/* Ustore urq verfication module list. */ -#define USTORE_VERIFY_URQ_SUB_HEADER 0x00020001 -#define USTORE_VERIFY_URQ_SUB_ITEM 0x00020002 -#define USTORE_VERIFY_URQ_SUB_METADATA 0x00020004 -#define USTORE_VERIFY_SUB_MOD_MASK 0x0000ffff - /* Ustore verification level of each modules. */ typedef enum VerifyLevel { USTORE_VERIFY_NONE = 0, @@ -78,19 +72,4 @@ do { \ } \ } while(0) -extern inline int ustore_verify_errlevel(void) -{ - return u_sess->attr.attr_storage.ustore_verify ? WARNING : ERROR; -} - -#define BEGIN_SAVE_VERIFY(tmp) \ -{ \ - temp = u_sess->attr.attr_storage.ustore_verify; \ - u_sess->attr.attr_storage.ustore_verify = true; \ -} - -#define END_SAVE_VERIFY(tmp) \ -{ \ - u_sess->attr.attr_storage.ustore_verify = tmp; \ -} #endif diff --git a/src/include/access/ustore/undo/knl_uundotxn.h b/src/include/access/ustore/undo/knl_uundotxn.h index c24b7ce42f..2ebdee9bea 100644 --- a/src/include/access/ustore/undo/knl_uundotxn.h +++ b/src/include/access/ustore/undo/knl_uundotxn.h @@ -172,11 +172,7 @@ private: UndoSlotPtr GetNextSlotPtr(UndoSlotPtr slotPtr); -void UndoTranslotVerifyPtr(TransactionSlot *slot, UndoSlotPtr slotPtr); - -void UndoTranslotVerifyBuffer(UndoSlotPtr slotPtr); - -void UndoTranslotVerify(TransactionSlot *slot, UndoSlotPtr slotPtr); +void TransactionSlotVerify(TransactionSlot *slot, UndoSlotPtr slotPtr); } // namespace undo #endif // __KNL_UUNDOTXN_H__ diff --git a/src/include/access/ustore/undo/knl_uundozone.h b/src/include/access/ustore/undo/knl_uundozone.h index 67fbf55e1e..dc7b1bc4ef 100644 --- a/src/include/access/ustore/undo/knl_uundozone.h +++ b/src/include/access/ustore/undo/knl_uundozone.h @@ -60,6 +60,19 @@ typedef struct UndoZoneMetaInfo { upersistence = (UndoPersistence)(zid / (int)PERSIST_ZONE_COUNT); \ } while (0) +#define LogUndoZoneInfo(uzone, level, msg) \ + do { \ + ereport(level, (errmodule(MOD_UNDO), \ + errmsg(UNDOFORMAT("%s. ZoneId %d, insertUrecPtr %lu, discardUrecPtr %lu, forceDiscardUrecPtr %lu, " \ + "allocateTSlotPtr %lu, recycleTSlotPtr %lu, recycleXid %lu, frozenXid %lu, " \ + "undoSpaceInfo: head %lu, tail %lu, slotSpaceInfo: head %lu, tail %lu."), \ + msg, uzone->GetZoneId(), \ + uzone->GetInsertURecPtr(), uzone->GetDiscardURecPtr(), uzone->GetForceDiscardURecPtr(), \ + uzone->GetAllocateTSlotPtr(), uzone->GetRecycleTSlotPtr(), uzone->GetRecycleXid(), \ + uzone->GetFrozenXid(), uzone->GetUndoSpace()->Head(), uzone->GetUndoSpace()->Tail(), \ + uzone->GetSlotSpace()->Head(), uzone->GetSlotSpace()->Tail()))); \ + } while(0) + class UndoZone : public BaseObject { public: UndoZone(void); @@ -385,13 +398,10 @@ public: static bool UndoZoneInUse(int zid, UndoPersistence upersistence); }; // class UndoZoneGroup -void AllocateZonesBeforXid(); +void AllocateZonesBeforeXid(); void InitZone(UndoZone *uzone, const int zoneId, UndoPersistence upersistence); void InitUndoSpace(UndoZone *uzone, UndoSpaceType type); void exrto_recycle_residual_undo_file(char *FuncName); - -void UndoZoneVerifyPtr(UndoZone *uzone); - void UndoZoneVerify(UndoZone *uzone); } // namespace undo diff --git a/src/include/knl/knl_guc/knl_session_attr_storage.h b/src/include/knl/knl_guc/knl_session_attr_storage.h index 7aff202998..9825b46221 100755 --- a/src/include/knl/knl_guc/knl_session_attr_storage.h +++ b/src/include/knl/knl_guc/knl_session_attr_storage.h @@ -241,7 +241,6 @@ typedef struct knl_session_attr_storage { int umax_search_length_for_prune; int ustore_verify_level; int ustore_verify_module; - bool ustore_verify; int index_trace_level; int archive_interval; bool enable_ustore_sync_rollback; diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 35039ff727..3474b97759 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -421,7 +421,7 @@ typedef struct StdRdOptions { #define HEAP_MIN_FILLFACTOR 10 #define HEAP_DEFAULT_FILLFACTOR 100 -#define UHEAP_DEFAULT_FILLFACTOR 92 +#define UHEAP_DEFAULT_FILLFACTOR 100 #define UHEAP_MIN_TD 2 #define UHEAP_MAX_TD 128 diff --git a/src/test/regress/input/single_node_produce_commit_rollback.source b/src/test/regress/input/single_node_produce_commit_rollback.source index 8d81143f71..d9b87d20c8 100644 --- a/src/test/regress/input/single_node_produce_commit_rollback.source +++ b/src/test/regress/input/single_node_produce_commit_rollback.source @@ -997,41 +997,31 @@ select proc1(1); drop procedure proc1; drop table proc1_tb1; - --- test savepoint rollback for ustore -drop table test1_ustore; -create table test1_ustore(a int, b int) with (storage_type=ustore); -create unique index idx_test1_ustore on test1_ustore(a); -insert into test1_ustore values(3,1); - -create or replace procedure p1_test_ustore as -va int; +drop table if exists t1; +create table t1(c1 int) with (storage_type=ustore, fillfactor=80); +insert into t1 values(1); +create or replace procedure p_t1_for_rollback as begin - insert into test1_ustore values(5,1); - insert into test1_ustore values(7,1); + insert into t1 values(2); begin rollback; exception when others then - raise info 'err 1'; + raise info 'exception rollback 1'; rollback; end; exception when others then - raise info 'err 2'; + raise info 'exception rollback 2s'; rollback; end; / - -call p1_test_ustore(); -insert into test1_ustore values(5,1); -analyze test1_ustore; -select pg_sleep(3); -analyze test1_ustore; -select pg_sleep(3); -analyze test1_ustore; -select pg_sleep(3); -select * from test1_ustore; -drop table test1_ustore; -drop procedure p1_test_ustore; +call p_t1_for_rollback(); +insert into t1 values(3); +analyze t1; +select pg_sleep(1); +analyze t1; +select pg_sleep(1); +select * from t1; +drop table t1; drop procedure test_without_commit; drop procedure test_empty_sp; @@ -1090,6 +1080,7 @@ drop procedure pro_base13_08; drop procedure proc_test_3; drop procedure proc_test_2; drop procedure proc_test_1; +drop procedure p_t1_for_rollback; /*close AUTOCOMMIT cannot autocommit transaction*/ create table test(a int); diff --git a/src/test/regress/output/single_node_produce_commit_rollback.source b/src/test/regress/output/single_node_produce_commit_rollback.source index 016b8eaddb..9147f309b2 100644 --- a/src/test/regress/output/single_node_produce_commit_rollback.source +++ b/src/test/regress/output/single_node_produce_commit_rollback.source @@ -1490,67 +1490,55 @@ CONTEXT: referenced column: proc1 drop procedure proc1; drop table proc1_tb1; --- test savepoint rollback for ustore -drop table test1_ustore; -ERROR: table "test1_ustore" does not exist -create table test1_ustore(a int, b int) with (storage_type=ustore); -create unique index idx_test1_ustore on test1_ustore(a); -insert into test1_ustore values(3,1); -create or replace procedure p1_test_ustore as -va int; +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +create table t1(c1 int) with (storage_type=ustore, fillfactor=80); +insert into t1 values(1); +create or replace procedure p_t1_for_rollback as begin - insert into test1_ustore values(5,1); - insert into test1_ustore values(7,1); + insert into t1 values(2); begin rollback; exception when others then - raise info 'err 1'; + raise info 'exception rollback 1'; rollback; end; exception when others then - raise info 'err 2'; + raise info 'exception rollback 2s'; rollback; end; / -call p1_test_ustore(); +call p_t1_for_rollback(); --?ARNING: [Rollback skip] xid(.*), curxid(.*), start(.*), end(.*). There is no undo record in the top-level transaction. FirstUrp(.*), lastestUrp(.*), lastestXactUrp(.*). -CONTEXT: PL/pgSQL function p1_test_ustore() line 6 at ROLLBACK - p1_test_ustore ----------------- - -(1 row) - -insert into test1_ustore values(5,1); -analyze test1_ustore; -select pg_sleep(3); - pg_sleep ----------- +CONTEXT: PL/pgSQL function p_t1_for_rollback() line 4 at ROLLBACK + p_t1_for_rollback +------------------- (1 row) -analyze test1_ustore; -select pg_sleep(3); +insert into t1 values(3); +analyze t1; +select pg_sleep(1); pg_sleep ---------- (1 row) -analyze test1_ustore; -select pg_sleep(3); +analyze t1; +select pg_sleep(1); pg_sleep ---------- (1 row) -select * from test1_ustore; - a | b ----+--- - 3 | 1 - 5 | 1 +select * from t1; + c1 +---- + 1 + 3 (2 rows) -drop table test1_ustore; -drop procedure p1_test_ustore; +drop table t1; drop procedure test_without_commit; drop procedure test_empty_sp; drop procedure test_commit; @@ -1608,6 +1596,7 @@ drop procedure pro_base13_08; drop procedure proc_test_3; drop procedure proc_test_2; drop procedure proc_test_1; +drop procedure p_t1_for_rollback; /*close AUTOCOMMIT cannot autocommit transaction*/ create table test(a int); \set AUTOCOMMIT 'off' diff --git a/src/test/regress/output/ustore_subpartition_vacuum_partition.source b/src/test/regress/output/ustore_subpartition_vacuum_partition.source index 6607e71608..7cea6046b7 100644 --- a/src/test/regress/output/ustore_subpartition_vacuum_partition.source +++ b/src/test/regress/output/ustore_subpartition_vacuum_partition.source @@ -124,7 +124,7 @@ CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 81920 + 73728 (1 row) --delete & insert @@ -139,7 +139,7 @@ INSERT INTO range_list_sales1 SELECT generate_series(1,1000), SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 122880 + 114688 (1 row) --vacuum full partition @@ -150,7 +150,7 @@ VACUUM FULL range_list_sales1 PARTITION (customer3); SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 81920 + 73728 (1 row) --delete & insert @@ -165,7 +165,7 @@ INSERT INTO range_list_sales1 SELECT generate_series(1,1000), SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 122880 + 114688 (1 row) --vacuum full subpartition @@ -180,7 +180,7 @@ VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); SELECT pg_relation_size('range_list_sales1'); pg_relation_size ------------------ - 81920 + 73728 (1 row) --check index is ok -- Gitee From ed9a6ed9a6a17f5dbfd5b68b4def52ffe9dcb0f6 Mon Sep 17 00:00:00 2001 From: lukeman Date: Sat, 24 Aug 2024 09:37:47 +0800 Subject: [PATCH 227/347] =?UTF-8?q?=E3=80=90=E5=9B=9E=E5=90=886.0.0?= =?UTF-8?q?=E3=80=91hex=E5=92=8Cbit=5Fand=E5=87=BD=E6=95=B0=EF=BC=8C?= =?UTF-8?q?=E5=BD=93=E5=85=A5=E5=8F=82=E4=B8=BA=E6=95=B4=E6=95=B0=E6=88=96?= =?UTF-8?q?=E4=BA=8C=E8=BF=9B=E5=88=B6=E7=B1=BB=E5=9E=8B=E6=97=B6=E5=92=8C?= =?UTF-8?q?mysql=E6=9F=A5=E8=AF=A2=E7=BB=93=E6=9E=9C=E4=B8=8D=E4=B8=80?= =?UTF-8?q?=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/runtime/executor/nodeAgg.cpp | 44 ++++++++++++++++++- .../runtime/executor/nodeWindowAgg.cpp | 14 +++++- src/include/executor/node/nodeAgg.h | 2 + 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/runtime/executor/nodeAgg.cpp b/src/gausskernel/runtime/executor/nodeAgg.cpp index cc540f10f8..eb610fd677 100644 --- a/src/gausskernel/runtime/executor/nodeAgg.cpp +++ b/src/gausskernel/runtime/executor/nodeAgg.cpp @@ -266,6 +266,37 @@ void initialize_phase(AggState* aggstate, int newphase) aggstate->phase = &aggstate->phases[newphase]; } +Datum get_bit_and_initval(Oid aggtranstype, int typmod) +{ + Oid typinput; + Oid typioparam; + char* strInitVal = NULL; + Datum initVal; + errno_t rc; + getTypeInputInfo(aggtranstype, &typinput, &typioparam); + int initValLen = typmod - (int)VARHDRSZ; + int charsPerByte = 2; + size_t strLen = (initValLen + 1) * charsPerByte + 1; // +2 for "\x" and +1 for '\0' + strInitVal = (char*)palloc(strLen * sizeof(char)); + strInitVal[0] = '\\'; + strInitVal[1] = 'x'; + strInitVal[strLen - 1] = '\0'; + rc = memset_s(strInitVal + charsPerByte, initValLen * charsPerByte, 'F', initValLen * charsPerByte); + securec_check(rc, "\0", "\0"); + initVal = OidInputFunctionCall(typinput, strInitVal, typioparam, -1); + pfree_ext(strInitVal); + return initVal; +} + +bool is_binary_type_in_dolphin(Oid typeOid) +{ + if (!u_sess->attr.attr_sql.dolphin) { + return false; + } + return (typeOid == get_typeoid(PG_CATALOG_NAMESPACE, "binary")) || + (typeOid == get_typeoid(PG_CATALOG_NAMESPACE, "varbinary")); +} + /* * Fetch a tuple from either the outer plan (for phase 0) or from the sorter * populated by the previous phase. Copy it to the sorter for the next phase @@ -463,8 +494,19 @@ static void advance_transition_function( int i; for (i = 1; i <= numTransInputs; i++) { - if (fcinfo->argnull[i]) + Oid aggtranstype = peraggstate->aggref->aggtrantype; + ListCell* arg = list_head(peraggstate->aggref->args); + TargetEntry *tle = (TargetEntry *)lfirst(arg); + if (fcinfo->argnull[i] && strcmp(get_func_name(peraggstate->aggref->aggfnoid), "bit_and") == 0 && + is_binary_type_in_dolphin(aggtranstype) && + pergroupstate->transValueIsNull && IsA(tle->expr, Var)) { + Var* var = (Var*)tle->expr; + pergroupstate->transValue = get_bit_and_initval(aggtranstype, var->vartypmod); + pergroupstate->transValueIsNull = false; return; + } else if (fcinfo->argnull[i]) { + return; + } } if (pergroupstate->noTransValue) { /* diff --git a/src/gausskernel/runtime/executor/nodeWindowAgg.cpp b/src/gausskernel/runtime/executor/nodeWindowAgg.cpp index f6a0f2dee1..ed806bc386 100644 --- a/src/gausskernel/runtime/executor/nodeWindowAgg.cpp +++ b/src/gausskernel/runtime/executor/nodeWindowAgg.cpp @@ -51,6 +51,7 @@ #include "utils/memutils.h" #include "utils/syscache.h" #include "windowapi.h" +#include "executor/node/nodeAgg.h" static TupleTableSlot* ExecWindowAgg(PlanState* state); static void initialize_windowaggregate( @@ -137,7 +138,18 @@ static void advance_windowaggregate( * just keep the prior transValue. */ for (i = 1; i <= num_arguments; i++) { - if (fcinfo->argnull[i]) { + Oid aggtranstype = perfuncstate->wfunc->wintype; + ListCell* arg = list_head(perfuncstate->wfunc->args); + TargetEntry *tle = (TargetEntry *)lfirst(arg); + if (fcinfo->argnull[i] && strcmp(get_func_name(perfuncstate->wfunc->winfnoid), "bit_and") == 0 && + is_binary_type_in_dolphin(aggtranstype) && + peraggstate->transValueIsNull && IsA(tle, Var)) { + Var* var = (Var*)tle; + peraggstate->transValue = get_bit_and_initval(aggtranstype, var->vartypmod); + peraggstate->transValueIsNull = false; + MemoryContextSwitchTo(old_context); + return; + } else if (fcinfo->argnull[i]) { MemoryContextSwitchTo(old_context); return; } diff --git a/src/include/executor/node/nodeAgg.h b/src/include/executor/node/nodeAgg.h index b38e927bdb..eaecc8f2ce 100644 --- a/src/include/executor/node/nodeAgg.h +++ b/src/include/executor/node/nodeAgg.h @@ -488,5 +488,7 @@ extern void agg_spill_to_disk(AggWriteFileControl* TempFileControl, TupleHashTab int64 numGroups, bool isAgg, int planId, int dop, Instrumentation* intrument = NULL); extern void ExecEarlyFreeAggregation(AggState* node); extern void ExecReSetAgg(AggState* node); +extern bool is_binary_type_in_dolphin(Oid typeOid); +extern Datum get_bit_and_initval(Oid aggtranstype, int initValLen); #endif /* NODEAGG_H */ -- Gitee From 46ad2f376253ebb3601e6efa897b6d05fbd047ce Mon Sep 17 00:00:00 2001 From: muyulinzhong Date: Thu, 29 Aug 2024 17:07:35 +0800 Subject: [PATCH 228/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=20=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=E9=80=BB=E8=BE=91=EF=BC=8C=E9=81=BF=E5=85=8Dreboot?= =?UTF-8?q?=E6=97=A0=E6=B3=95=E6=8E=A7=E5=88=B6=E8=BF=9B=E7=A8=8B=E9=80=80?= =?UTF-8?q?=E5=87=BA=E9=A1=BA=E5=BA=8F=E6=89=80=E5=AF=BC=E8=87=B4=E6=84=8F?= =?UTF-8?q?=E5=A4=96core=E6=8E=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index cd2e6eb47a..6d61810da7 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -6450,6 +6450,12 @@ void UpdateControlFile(void) } if (fd < 0) { + if (ENABLE_DSS && g_instance.status == FastShutdown && errno == ERR_DSS_CONNECT_FAILED + && !g_instance.dms_cxt.SSReformInfo.in_reform) { + ereport(WARNING, (errmsg("could not open control file due to lost connection to DSS," + " and CM may not have ability to control exit process, so shutdown directly."))); + exit(0); + } ereport(FATAL, (errcode_for_file_access(), errmsg("could not open control file \"%s\": %s", fname[i], TRANSLATE_ERRNO))); } -- Gitee From d6e88e46ff17c78b0bc3b3b0334b0da1f322186c Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 29 Aug 2024 20:12:51 +0800 Subject: [PATCH 229/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9Ags=5Fpr?= =?UTF-8?q?obackup=E6=97=B6=E7=9A=84=E9=83=A8=E5=88=86=E6=89=93=E5=8D=B0?= =?UTF-8?q?=E4=BF=A1=E6=81=AF=E9=9C=80=E5=B0=86oss=E4=BF=AE=E6=94=B9?= =?UTF-8?q?=E4=B8=BAs3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/configure.cpp | 4 ++-- src/bin/pg_probackup/delete.cpp | 2 +- src/bin/pg_probackup/init.cpp | 2 +- src/bin/pg_probackup/merge.cpp | 2 +- src/bin/pg_probackup/oss/backup.cpp | 2 +- src/bin/pg_probackup/oss/oss_operator.cpp | 4 ++-- src/bin/pg_probackup/pg_probackupb.h | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/bin/pg_probackup/configure.cpp b/src/bin/pg_probackup/configure.cpp index 2d6d0df47d..67a640b3f9 100644 --- a/src/bin/pg_probackup/configure.cpp +++ b/src/bin/pg_probackup/configure.cpp @@ -43,7 +43,7 @@ static void show_configure_json(ConfigOption *opt); #define OPTION_COMPRESS_GROUP "Compression parameters" #define OPTION_REMOTE_GROUP "Remote access parameters" #define OPTION_DSS_GROUP "DSS connect parameters" -#define OPTION_OSS_GROUP "OSS connect parameters" +#define OPTION_OSS_GROUP "S3 connect parameters" /* * Short name should be non-printable ASCII character. @@ -602,7 +602,7 @@ readInstanceConfigFile(const char *instance_name) 'i', 235, "instance-id", &instance->dss.instance_id, SOURCE_CMD, (OptionSource)0, OPTION_DSS_GROUP, 0, option_get_value }, - /* OSS options */ + /* S3 options */ { 's', 236, "access-id", &instance_config.oss.access_id, SOURCE_CMD, (OptionSource)0, diff --git a/src/bin/pg_probackup/delete.cpp b/src/bin/pg_probackup/delete.cpp index cd5541c095..05f55a75eb 100644 --- a/src/bin/pg_probackup/delete.cpp +++ b/src/bin/pg_probackup/delete.cpp @@ -187,7 +187,7 @@ void do_retention(void) backup_merged = false; if (current.media_type == MEDIA_TYPE_OSS) { - elog(ERROR, "Not supported when specifying OSS options"); + elog(ERROR, "Not supported when specifying S3 options"); } /* Get a complete list of backups. */ diff --git a/src/bin/pg_probackup/init.cpp b/src/bin/pg_probackup/init.cpp index ba76730b66..99613bea21 100644 --- a/src/bin/pg_probackup/init.cpp +++ b/src/bin/pg_probackup/init.cpp @@ -83,7 +83,7 @@ do_add_instance(InstanceConfig *instance) Oss::Oss* oss = getOssClient(); char* bucket_name = getBucketName(); if (!oss->BucketExists(bucket_name)) { - elog(ERROR, "Bucket '%s' does not exist on OSS, please create it first.", bucket_name); + elog(ERROR, "Bucket '%s' does not exist on S3, please create it first.", bucket_name); } } diff --git a/src/bin/pg_probackup/merge.cpp b/src/bin/pg_probackup/merge.cpp index b29c130675..ca0b6c4f89 100644 --- a/src/bin/pg_probackup/merge.cpp +++ b/src/bin/pg_probackup/merge.cpp @@ -123,7 +123,7 @@ do_merge(time_t backup_id) if (current.media_type == MEDIA_TYPE_OSS) { - elog(ERROR, "Not supported when specifying OSS options"); + elog(ERROR, "Not supported when specifying S3 options"); } if (backup_id == INVALID_BACKUP_ID) diff --git a/src/bin/pg_probackup/oss/backup.cpp b/src/bin/pg_probackup/oss/backup.cpp index 5ce1771986..5797d20394 100644 --- a/src/bin/pg_probackup/oss/backup.cpp +++ b/src/bin/pg_probackup/oss/backup.cpp @@ -263,7 +263,7 @@ void backupFiles(FileAppender* appender, backup_files_arg* arg) time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); - elog(INFO, "Backup files are backuped to oss, time elapsed: %s", pretty_time); + elog(INFO, "Backup files are backuped to s3, time elapsed: %s", pretty_time); } diff --git a/src/bin/pg_probackup/oss/oss_operator.cpp b/src/bin/pg_probackup/oss/oss_operator.cpp index cf0e93be83..21268ed9f1 100644 --- a/src/bin/pg_probackup/oss/oss_operator.cpp +++ b/src/bin/pg_probackup/oss/oss_operator.cpp @@ -262,7 +262,7 @@ char* getBucketName() { char* bucket_name = instance_config.oss.access_bucket; if (bucket_name == NULL) { - elog(ERROR, "Required parameter not specified: OSS(--bucket_name)"); + elog(ERROR, "Required parameter not specified: S3(--bucket_name)"); } return bucket_name; } @@ -282,7 +282,7 @@ Oss::Oss* getOssClient() const char* access_bucket = instance_config.oss.access_bucket; if (!endpoint || !access_key || !secret_key || !access_bucket) { elog(ERROR, - "Required parameter not specified: OSS(--endpoint, --access_bucket, --access_id or --access_key)"); + "Required parameter not specified: S3(--endpoint, --access_bucket, --access_id or --access_key)"); } oss_client = new Oss::Oss(endpoint, access_key, secret_key, region); } diff --git a/src/bin/pg_probackup/pg_probackupb.h b/src/bin/pg_probackup/pg_probackupb.h index 9144f8fee1..9d17e1bc00 100644 --- a/src/bin/pg_probackup/pg_probackupb.h +++ b/src/bin/pg_probackup/pg_probackupb.h @@ -306,7 +306,7 @@ struct pgBackup /* media type */ MediaType media_type; - /* local or oss */ + /* local or s3 */ oss_status_t oss_status; /* sender context */ SenderCxt* sender_cxt; -- Gitee From d89f731511d8ce9a1c9cabc2ad8b75237546b7ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Thu, 29 Aug 2024 22:10:56 +0800 Subject: [PATCH 230/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dgs=5Fstat=5Fundo?= =?UTF-8?q?=EF=BC=8Cgs=5Fxlog=5Fkeepers=E7=B3=BB=E7=BB=9F=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E5=8D=87=E7=BA=A7=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../rollback-post_catalog_maindb_92_935.sql | 7 ------- .../rollback_catalog_maindb_92_935.sql | 8 ++++++++ ...0.sql => rollback_catalog_maindb_92_950.sql} | 4 ++-- .../rollback-post_catalog_otherdb_92_935.sql | 7 ------- .../rollback_catalog_otherdb_92_935.sql | 8 ++++++++ ....sql => rollback_catalog_otherdb_92_950.sql} | 4 ++-- .../upgrade-post_catalog_maindb_92_935.sql | 16 ---------------- .../upgrade_catalog_maindb_92_935.sql | 17 +++++++++++++++++ ...50.sql => upgrade_catalog_maindb_92_950.sql} | 2 +- .../upgrade-post_catalog_otherdb_92_935.sql | 16 ---------------- .../upgrade_catalog_otherdb_92_935.sql | 17 +++++++++++++++++ ...0.sql => upgrade_catalog_otherdb_92_950.sql} | 2 +- 12 files changed, 56 insertions(+), 52 deletions(-) delete mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_935.sql rename src/include/catalog/upgrade_sql/rollback_catalog_maindb/{rollback-post_catalog_maindb_92_950.sql => rollback_catalog_maindb_92_950.sql} (63%) delete mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_935.sql rename src/include/catalog/upgrade_sql/rollback_catalog_otherdb/{rollback-post_catalog_otherdb_92_950.sql => rollback_catalog_otherdb_92_950.sql} (63%) delete mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_935.sql rename src/include/catalog/upgrade_sql/upgrade_catalog_maindb/{upgrade-post_catalog_maindb_92_950.sql => upgrade_catalog_maindb_92_950.sql} (90%) delete mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_935.sql rename src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/{upgrade-post_catalog_otherdb_92_950.sql => upgrade_catalog_otherdb_92_950.sql} (90%) diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_935.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_935.sql deleted file mode 100644 index 159ef88a47..0000000000 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_935.sql +++ /dev/null @@ -1,7 +0,0 @@ -/*------ add sys fuction gs_stat_undo ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); -SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; -CREATE FUNCTION pg_catalog.gs_stat_undo(OUT curr_used_zone_count int4, OUT top_used_zones text, OUT curr_used_undo_size int4, -OUT undo_threshold int4, OUT oldest_xid_in_undo oid, OUT oldest_xmin oid, OUT total_undo_chain_len oid, OUT max_undo_chain_len oid, -OUT create_undo_file_count int4, OUT discard_undo_file_count int4) -RETURNS record LANGUAGE INTERNAL as 'gs_stat_undo'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql index b102902d86..5ab58ef599 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql @@ -9,3 +9,11 @@ DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT z SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; + +/*------ add sys fuction gs_stat_undo ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; +CREATE FUNCTION pg_catalog.gs_stat_undo(OUT curr_used_zone_count int4, OUT top_used_zones text, OUT curr_used_undo_size int4, +OUT undo_threshold int4, OUT oldest_xid_in_undo oid, OUT oldest_xmin oid, OUT total_undo_chain_len oid, OUT max_undo_chain_len oid, +OUT create_undo_file_count int4, OUT discard_undo_file_count int4) +RETURNS record LANGUAGE INTERNAL as 'gs_stat_undo'; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_950.sql similarity index 63% rename from src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql rename to src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_950.sql index 587afbd3e5..1fa8165edb 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_950.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_950.sql @@ -1,7 +1,7 @@ -DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers(out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text); +DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers(); SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,9040; CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers (out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text) -RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_xlog_keepers'; \ No newline at end of file +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_xlog_keepers'; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_935.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_935.sql deleted file mode 100644 index 159ef88a47..0000000000 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_935.sql +++ /dev/null @@ -1,7 +0,0 @@ -/*------ add sys fuction gs_stat_undo ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); -SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; -CREATE FUNCTION pg_catalog.gs_stat_undo(OUT curr_used_zone_count int4, OUT top_used_zones text, OUT curr_used_undo_size int4, -OUT undo_threshold int4, OUT oldest_xid_in_undo oid, OUT oldest_xmin oid, OUT total_undo_chain_len oid, OUT max_undo_chain_len oid, -OUT create_undo_file_count int4, OUT discard_undo_file_count int4) -RETURNS record LANGUAGE INTERNAL as 'gs_stat_undo'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql index b102902d86..5ab58ef599 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql @@ -9,3 +9,11 @@ DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT z SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; + +/*------ add sys fuction gs_stat_undo ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; +CREATE FUNCTION pg_catalog.gs_stat_undo(OUT curr_used_zone_count int4, OUT top_used_zones text, OUT curr_used_undo_size int4, +OUT undo_threshold int4, OUT oldest_xid_in_undo oid, OUT oldest_xmin oid, OUT total_undo_chain_len oid, OUT max_undo_chain_len oid, +OUT create_undo_file_count int4, OUT discard_undo_file_count int4) +RETURNS record LANGUAGE INTERNAL as 'gs_stat_undo'; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_950.sql similarity index 63% rename from src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql rename to src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_950.sql index 587afbd3e5..1fa8165edb 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_950.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_950.sql @@ -1,7 +1,7 @@ -DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers(out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text); +DROP FUNCTION IF EXISTS pg_catalog.gs_xlog_keepers(); SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,9040; CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers (out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text) -RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_xlog_keepers'; \ No newline at end of file +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT as 'gs_xlog_keepers'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_935.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_935.sql deleted file mode 100644 index 08fa0111bc..0000000000 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_935.sql +++ /dev/null @@ -1,16 +0,0 @@ -/*------ add sys fuction gs_stat_undo ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); -SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; -CREATE FUNCTION pg_catalog.gs_stat_undo( - OUT curr_used_zone_count int4, - OUT top_used_zones text, - OUT curr_used_undo_size int4, - OUT undo_threshold int4, - OUT global_recycle_xid xid, - OUT oldest_xmin xid, - OUT total_undo_chain_len int8, - OUT max_undo_chain_len int8, - OUT create_undo_file_count int4, - OUT discard_undo_file_count int4) -RETURNS SETOF record -LANGUAGE INTERNAL STABLE NOT SHIPPABLE ROWS 1 as 'gs_stat_undo'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql index e674756719..f98c1eba3e 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql @@ -13,3 +13,20 @@ DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT z SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; + +/*------ add sys fuction gs_stat_undo ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; +CREATE FUNCTION pg_catalog.gs_stat_undo( + OUT curr_used_zone_count int4, + OUT top_used_zones text, + OUT curr_used_undo_size int4, + OUT undo_threshold int4, + OUT global_recycle_xid xid, + OUT oldest_xmin xid, + OUT total_undo_chain_len int8, + OUT max_undo_chain_len int8, + OUT create_undo_file_count int4, + OUT discard_undo_file_count int4) +RETURNS SETOF record +LANGUAGE INTERNAL STABLE NOT SHIPPABLE ROWS 1 as 'gs_stat_undo'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_950.sql similarity index 90% rename from src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql rename to src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_950.sql index 2bddfb1418..6771a737b9 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_950.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_950.sql @@ -4,4 +4,4 @@ CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers (out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text) -RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT ROWS 1000 NOT SHIPPABLE as 'gs_xlog_keepers'; \ No newline at end of file +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT ROWS 1000 NOT SHIPPABLE as 'gs_xlog_keepers'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_935.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_935.sql deleted file mode 100644 index 08fa0111bc..0000000000 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_935.sql +++ /dev/null @@ -1,16 +0,0 @@ -/*------ add sys fuction gs_stat_undo ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); -SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; -CREATE FUNCTION pg_catalog.gs_stat_undo( - OUT curr_used_zone_count int4, - OUT top_used_zones text, - OUT curr_used_undo_size int4, - OUT undo_threshold int4, - OUT global_recycle_xid xid, - OUT oldest_xmin xid, - OUT total_undo_chain_len int8, - OUT max_undo_chain_len int8, - OUT create_undo_file_count int4, - OUT discard_undo_file_count int4) -RETURNS SETOF record -LANGUAGE INTERNAL STABLE NOT SHIPPABLE ROWS 1 as 'gs_stat_undo'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql index e674756719..f98c1eba3e 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql @@ -13,3 +13,20 @@ DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT z SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; + +/*------ add sys fuction gs_stat_undo ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; +CREATE FUNCTION pg_catalog.gs_stat_undo( + OUT curr_used_zone_count int4, + OUT top_used_zones text, + OUT curr_used_undo_size int4, + OUT undo_threshold int4, + OUT global_recycle_xid xid, + OUT oldest_xmin xid, + OUT total_undo_chain_len int8, + OUT max_undo_chain_len int8, + OUT create_undo_file_count int4, + OUT discard_undo_file_count int4) +RETURNS SETOF record +LANGUAGE INTERNAL STABLE NOT SHIPPABLE ROWS 1 as 'gs_stat_undo'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_950.sql similarity index 90% rename from src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql rename to src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_950.sql index 2bddfb1418..6771a737b9 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_950.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_950.sql @@ -4,4 +4,4 @@ CREATE OR REPLACE FUNCTION pg_catalog.gs_xlog_keepers (out keeptype pg_catalog.text, out keepsegment pg_catalog.text, out describe pg_catalog.text) -RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT ROWS 1000 NOT SHIPPABLE as 'gs_xlog_keepers'; \ No newline at end of file +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE STRICT ROWS 1000 NOT SHIPPABLE as 'gs_xlog_keepers'; -- Gitee From 169d76ecf92d04d5b765ddd02bdcd4360c203bbd Mon Sep 17 00:00:00 2001 From: jiangyan <18091841830@163.com> Date: Fri, 30 Aug 2024 09:21:27 +0800 Subject: [PATCH 231/347] =?UTF-8?q?disable=5Fconn=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E4=B8=8D=E5=86=8D=E5=90=AF=E5=8A=A8walrecv?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/postmaster/postmaster.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index c1ce23ad08..e56a4976bc 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -15082,9 +15082,6 @@ Datum disable_conn(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_ATTRIBUTE), errmsg("Invalid null pointer attribute for disable_conn()"))); } - if (!WalRcvInProgress()) { - RequestXLogStreamForBarrier(); - } if (g_instance.pid_cxt.BarrierPreParsePID == 0) { g_instance.csn_barrier_cxt.startBarrierPreParse = false; } -- Gitee From bd17be7e89a4645409861ce4d0810ed6fab29cb5 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Mon, 1 Jul 2024 13:29:11 +0800 Subject: [PATCH 232/347] =?UTF-8?q?=E6=9C=AA=E6=89=A7=E8=A1=8C=E7=9A=84sql?= =?UTF-8?q?=E8=AF=AD=E5=8F=A5=E4=B8=AD=E5=8C=85=E5=90=ABraise=E4=B8=8D?= =?UTF-8?q?=E5=AD=98=E5=9C=A8=E5=88=97=E6=97=B6=E6=8A=9B=E5=87=BA=E9=94=99?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/gram.y | 33 ++-- src/common/pl/plpgsql/src/pl_comp.cpp | 4 +- src/common/pl/plpgsql/src/pl_exec.cpp | 59 ++++++- src/common/pl/plpgsql/src/pl_funcs.cpp | 3 + src/include/utils/plpgsql.h | 1 + .../expected/plpgsql_cursor_rowtype.out | 159 ++++++++---------- .../regress/sql/plpgsql_cursor_rowtype.sql | 40 +++++ 7 files changed, 190 insertions(+), 109 deletions(-) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 02bf841374..0183dfc3db 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -9289,18 +9289,27 @@ read_sql_construct6(int until, break; } case T_DATUM: - idents = yylval.wdatum.idents; - if(prev_tok != '.' && list_length(idents) >= 3) { - plpgsql_cast_reference_list(idents, &ds, false); - ds_changed = true; - break; - } else { - tok = yylex(); - curloc = yylloc; - plpgsql_push_back_token(tok); - plpgsql_append_source_text(&ds, loc, curloc); - ds_changed = true; - break; + { + idents = yylval.wdatum.idents; + int dno = yylval.wdatum.datum->dno; + PLpgSQL_datum *datum = (PLpgSQL_datum *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[dno]; + if (datum->dtype == PLPGSQL_DTYPE_RECFIELD) { + PLpgSQL_recfield *rec_field = (PLpgSQL_recfield *)datum; + PLpgSQL_rec *rec = (PLpgSQL_rec *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[rec_field->recparentno]; + rec->field_need_check = lappend_int(rec->field_need_check, dno); + } + if(prev_tok != '.' && list_length(idents) >= 3) { + plpgsql_cast_reference_list(idents, &ds, false); + ds_changed = true; + break; + } else { + tok = yylex(); + curloc = yylloc; + plpgsql_push_back_token(tok); + plpgsql_append_source_text(&ds, loc, curloc); + ds_changed = true; + break; + } } case T_WORD: AddNamespaceIfPkgVar(yylval.word.ident, save_IdentifierLookup); diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index 9428b697dd..117c2bb18a 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -2786,7 +2786,8 @@ bool plpgsql_parse_dblword(char* word1, char* word2, PLwdatum* wdatum, PLcword* } } if (!exist) { - break; + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", rec->refname, word2))); } } } @@ -3872,6 +3873,7 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ } case PLPGSQL_TTYPE_CURSORROW: { PLpgSQL_rec* rec = (PLpgSQL_rec*)palloc0(sizeof(PLpgSQL_rec)); + rec->field_need_check = NIL; rec->dtype = PLPGSQL_DTYPE_CURSORROW; rec->refname = pstrdup(refname); rec->lineno = lineno; diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index 2c3415c77d..52da7b5af0 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1193,6 +1193,25 @@ static void rowtype_column_len_check(Form_pg_attribute tattr, HeapTuple var_tup, } } +static void check_recfield_valid(PLpgSQL_recfield *rec_field, TupleDesc tupdesc, char* recname) +{ + int natts = tupdesc->natts; + bool found = false; + if (natts == 0) { + return; + } + for (int fnum = 0; fnum < natts; fnum++) { + Form_pg_attribute attr = TupleDescAttr(tupdesc, fnum); + if (strcmp(NameStr(attr->attname), rec_field->fieldname) == 0) { + found = true; + break; + } + } + if (!found) + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", recname, rec_field->fieldname))); +} + static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *datum, PLpgSQL_function *func) { bool *replaces = NULL; @@ -1309,6 +1328,25 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d } +static void check_if_recfield_exist(PLpgSQL_execstate estate, int i, PLpgSQL_function* func) +{ + PLpgSQL_rec *rec = (PLpgSQL_rec *)estate.datums[i]; + TupleDesc tupdesc = rec->tupdesc; + if (HeapTupleIsValid(rec->tup) && rec->field_need_check != NIL) { + ListCell *lc = NULL; + foreach (lc, rec->field_need_check) { + int dno = (int)lfirst_int(lc); + PLpgSQL_datum *datum = NULL; + if (func->ndatums > dno) + datum = (PLpgSQL_datum *)func->datums[dno]; + if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_RECFIELD) { + PLpgSQL_recfield *rec_field = (PLpgSQL_recfield *)datum; + check_recfield_valid(rec_field, tupdesc, rec->refname); + } + } + } +} + /* ---------- * plpgsql_exec_autonm_function Called by the call handler for * autonomous function execution. @@ -1354,8 +1392,10 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; - if (rec->expr) + if (rec->expr) { exec_cursor_rowtype_init(&estate, estate.datums[i], func); + check_if_recfield_exist(estate, i, func); + } } } @@ -1701,8 +1741,10 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; - if (rec->expr) + if (rec->expr) { exec_cursor_rowtype_init(&estate, estate.datums[i], func); + check_if_recfield_exist(estate, i, func); + } } } @@ -2325,12 +2367,14 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) estate.datums[i] = copy_plpgsql_datum(func->datums[i]); } else { estate.datums[i] = func->datums[i]; - } + } - if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { + if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; - if (rec->expr) + if (rec->expr) { exec_cursor_rowtype_init(&estate, estate.datums[i], func); + check_if_recfield_exist(estate, i, func); + } } } @@ -3062,6 +3106,7 @@ PLpgSQL_datum* copy_plpgsql_datum(PLpgSQL_datum* datum) newm->tupdesc = NULL; newm->freetup = false; newm->freetupdesc = false; + newm->field_need_check = list_copy(((PLpgSQL_rec*)datum)->field_need_check); result = (PLpgSQL_datum*)newm; } break; @@ -14431,8 +14476,10 @@ plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata) estate.datums[i] = copy_plpgsql_datum(func->datums[i]); if (estate.datums[i]->dtype == PLPGSQL_DTYPE_CURSORROW) { PLpgSQL_rec *rec = (PLpgSQL_rec*)estate.datums[i]; - if (rec->expr) + if (rec->expr) { exec_cursor_rowtype_init(&estate, estate.datums[i], func); + check_if_recfield_exist(estate, i, func); + } } } diff --git a/src/common/pl/plpgsql/src/pl_funcs.cpp b/src/common/pl/plpgsql/src/pl_funcs.cpp index cfdd9071e9..c081e2505e 100644 --- a/src/common/pl/plpgsql/src/pl_funcs.cpp +++ b/src/common/pl/plpgsql/src/pl_funcs.cpp @@ -117,6 +117,9 @@ void plpgsql_add_pkg_ns(PLpgSQL_package* pkg) plpgsql_ns_additem(PLPGSQL_NSTYPE_CURSORROW, varno, objname, pkgname); } break; + case PLPGSQL_DTYPE_RECFIELD: + plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, varno, objname, pkgname); + break; case PLPGSQL_DTYPE_COMPOSITE: break; default: diff --git a/src/include/utils/plpgsql.h b/src/include/utils/plpgsql.h index c13dd26828..7fc49e8f77 100644 --- a/src/include/utils/plpgsql.h +++ b/src/include/utils/plpgsql.h @@ -596,6 +596,7 @@ typedef struct { /* Record variable (non-fixed structure) */ bool freetup; bool freetupdesc; List* pkg_name = NULL; + List* field_need_check = NULL; PLpgSQL_package* pkg = NULL; PLpgSQL_expr* default_val = NULL; PLpgSQL_expr* expr = NULL; diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 36d19e8520..01b310b9a8 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -829,6 +829,47 @@ INFO: str1 is 98 INFO: str1 is 99 drop table STORAGE_LARGE_TABLE_STORAGE_TABLE_000; drop table STORAGE_LARGE_CURSOR_TABLE_216; +-- test none execute error sql +set behavior_compat_options=''; +create table t_CurRowtype_Def_Case0001_1( +col1 tinyint primary key, +col2 smallint, +col3 int, +col4 bigint +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_currowtype_def_case0001_1_pkey" for table "t_currowtype_def_case0001_1" +declare + cursor cur_CurRowtype_Def_Case0003_1 is select * from t_CurRowtype_Def_Case0001_1; + source cur_CurRowtype_Def_Case0003_1%rowtype; +begin + open cur_CurRowtype_Def_Case0003_1; + loop + fetch cur_CurRowtype_Def_Case0003_1 into source; + exit when cur_CurRowtype_Def_Case0003_1%notfound; + raise notice '% , %',source.col1,source.col5; + end loop; + close cur_CurRowtype_Def_Case0003_1; +end; +/ +ERROR: record "source" has no field "col5" +CONTEXT: PL/pgSQL function inline_code_block during initialization of execution state +set behavior_compat_options='allow_procedure_compile_check'; +declare + cursor cur_CurRowtype_Def_Case0003_1 is select * from t_CurRowtype_Def_Case0001_1; + source cur_CurRowtype_Def_Case0003_1%rowtype; +begin + open cur_CurRowtype_Def_Case0003_1; + loop + fetch cur_CurRowtype_Def_Case0003_1 into source; + exit when cur_CurRowtype_Def_Case0003_1%notfound; + raise notice '% , %',source.col1,source.col5; + end loop; + close cur_CurRowtype_Def_Case0003_1; +end; +/ +ERROR: record "source" has no field "col5" +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 8 +drop table t_CurRowtype_Def_Case0001_1; --test: drop column create table int_4_2(a NUMBER, d NUMBER, b VARCHAR2(5)); insert into int_4_2(a, d, b) values(3, 6,'johan'); @@ -1386,9 +1427,9 @@ drop procedure pro_cs_trans_1; drop table cs_trans_1; -- test for rec in cursor loop show behavior_compat_options; - behavior_compat_options ----------------------------------------------------------- - allow_procedure_compile_check,disable_record_type_in_dml + behavior_compat_options +------------------------------- + allow_procedure_compile_check (1 row) create table test_table(col1 varchar2(10)); @@ -1635,6 +1676,7 @@ INFO: aa (1 row) +drop procedure check_compile_1; set behavior_compat_options=''; drop procedure check_compile; --游标依赖row type,后续alter type @@ -1751,6 +1793,7 @@ fetch c3; (1 row) close c3; +drop type if exists foo; ---- 不在 TRANSACTION Block里的游标声明导致 core的问题 --游标依赖row type,后续alter type drop type if exists type_cursor_bugfix_0001; @@ -1839,7 +1882,7 @@ NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() drop package pckg_test2; NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() drop schema plpgsql_cursor_rowtype cascade; -NOTICE: drop cascades to 26 other objects +NOTICE: drop cascades to 24 other objects DETAIL: drop cascades to table emp drop cascades to function pro_cursor_no_args_1() drop cascades to function pro_cursor_no_args_2() @@ -1864,8 +1907,6 @@ drop cascades to function plpgsql_cursor_rowtype.ppp1() --?.* drop cascades to function pro_close_cursor1() drop cascades to function pro_close_cursor2() -drop cascades to function check_compile_1() -drop cascades to type foo drop schema schema1 cascade; NOTICE: drop cascades to table schema1.t11 create schema cursor_rowtype; @@ -2090,10 +2131,8 @@ BEGIN close c; END; / -NOTICE: Smith -NOTICE: Jane -ERROR: record "source" has no field "oid" in assignment. -CONTEXT: PL/pgSQL function inline_code_block line 9 at assignment +ERROR: record "source" has no field "oid" +CONTEXT: PL/pgSQL function inline_code_block during initialization of execution state -- FUNC: normale create or replace function f1(b int) returns int as $$ @@ -2141,10 +2180,8 @@ BEGIN END; $$language plpgsql; call f1(2); -----error -NOTICE: Smith -NOTICE: Jane -ERROR: record "source" has no field "oid" in assignment. -CONTEXT: PL/pgSQL function f1(integer) line 11 at assignment +ERROR: record "source" has no field "oid" +CONTEXT: PL/pgSQL function f1(integer) during initialization of execution state -- FUNC: If change table struct create or replace function f1(b int) returns int as $$ @@ -2166,8 +2203,8 @@ drop table employees; create table employees(a varchar(20),b int); insert into employees(a,b) values ('johan',22); call f1(2); -----error -ERROR: record "source" has no field "first_name" in assignment. -CONTEXT: PL/pgSQL function f1(integer) line 10 at assignment +ERROR: record "source" has no field "first_name" +CONTEXT: PL/pgSQL function f1(integer) during initialization of execution state drop table employees; call f1(2); -----error ERROR: execute failed when parse the query: SELECT * @@ -2331,10 +2368,8 @@ BEGIN close c; END; / -NOTICE: Smith -NOTICE: Jane -ERROR: record "source" has no field "oid" in assignment. -CONTEXT: PL/pgSQL function inline_code_block line 9 at assignment +ERROR: record "source" has no field "oid" +CONTEXT: PL/pgSQL function inline_code_block during initialization of execution state -- FUNC: normale create or replace function f1(b int) returns int as $$ @@ -2382,10 +2417,8 @@ BEGIN END; $$language plpgsql; call f1(2); -----error -NOTICE: Smith -NOTICE: Jane -ERROR: record "source" has no field "oid" in assignment. -CONTEXT: PL/pgSQL function f1(integer) line 11 at assignment +ERROR: record "source" has no field "oid" +CONTEXT: PL/pgSQL function f1(integer) during initialization of execution state -- FUNC: If change table struct create or replace function f1(b int) returns int as $$ @@ -2407,8 +2440,8 @@ drop table employees; create table employees(a varchar(20),b int); insert into employees(a,b) values ('johan',22); call f1(2); -----error -ERROR: record "source" has no field "first_name" in assignment. -CONTEXT: PL/pgSQL function f1(integer) line 10 at assignment +ERROR: record "source" has no field "first_name" +CONTEXT: PL/pgSQL function f1(integer) during initialization of execution state drop table employees; call f1(2); -----error ERROR: execute failed when parse the query: SELECT * @@ -2507,9 +2540,8 @@ BEGIN return b; END; $$language plpgsql; -ERROR: "source.oid" is not a known variable -LINE 12: source.oid := 5; - ^ +ERROR: record "source" has no field "oid" +CONTEXT: compilation of PL/pgSQL function "f1" near line 10 -- FUNC: If change table struct create or replace function f1(b int) returns int as $$ @@ -2531,24 +2563,7 @@ drop table employees; create table employees(a varchar(20),b int); insert into employees(a,b) values ('johan',22); call f1(2); -----error -ERROR: "source.first_name" is not a known variable -LINE 10: source.first_name := 'Jane'; - ^ -QUERY: -DECLARE - CURSOR c IS - SELECT * - FROM employees; - source c%ROWTYPE; -BEGIN - open c; - fetch c into source; - source.first_name := 'Jane'; - raise notice '%', source.first_name; - close c; - return b; -END; - +ERROR: record "source" has no field "first_name" CONTEXT: compilation of PL/pgSQL function "f1" near line 9 drop table employees; call f1(2); -----error @@ -2622,9 +2637,8 @@ BEGIN return b; END; $$language plpgsql; -ERROR: "source.oid" is not a known variable -LINE 12: source.oid := 5; - ^ +ERROR: record "source" has no field "oid" +CONTEXT: compilation of PL/pgSQL function "f1" near line 10 call f1(2); NOTICE: Jane f1 @@ -2653,24 +2667,7 @@ drop table employees; create table employees(a varchar(20),b int); insert into employees(a,b) values ('johan',22); call f1(2); -----error -ERROR: "source.first_name" is not a known variable -LINE 10: source.first_name := 'Jane'; - ^ -QUERY: -DECLARE - CURSOR c IS - SELECT * - FROM employees; - source c%ROWTYPE; -BEGIN - open c; - fetch c into source; - source.first_name := 'Jane'; - raise notice '%', source.first_name; - close c; - return b; -END; - +ERROR: record "source" has no field "first_name" CONTEXT: compilation of PL/pgSQL function "f1" near line 9 drop table employees; call f1(2); -----error @@ -2802,26 +2799,8 @@ BEGIN close c; END; / -ERROR: "source.oid" is not a known variable -LINE 10: source.oid := 5; - ^ -QUERY: DECLARE - CURSOR c IS - SELECT first_name,last_name - FROM employees; - source c%ROWTYPE; -BEGIN - source.first_name := 'Jane'; source.last_name := 'Smith'; - raise notice '%', source.last_name; - raise notice '%', source.first_name; - source.oid := 5; - raise notice '%', source.oid; - open c; - fetch c into source; - source.first_name := 'Jane'; - raise notice '%', source.first_name; - close c; -END +ERROR: record "source" has no field "oid" +CONTEXT: compilation of PL/pgSQL function "p1" near line 9 call P1(2); -----error NOTICE: Jane p1 @@ -2864,8 +2843,8 @@ drop table employees; create table employees(a varchar(20),b int); insert into employees(a,b) values ('johan',22); call f1(2); -----error -ERROR: record "source" has no field "first_name" in assignment. -CONTEXT: PL/pgSQL function f1(integer) line 10 at assignment +ERROR: record "source" has no field "first_name" +CONTEXT: PL/pgSQL function f1(integer) during initialization of execution state drop table employees; call f1(2); -----error ERROR: execute failed when parse the query: SELECT * diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 9d5d3c730c..14b27a2152 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -613,6 +613,44 @@ End; drop table STORAGE_LARGE_TABLE_STORAGE_TABLE_000; drop table STORAGE_LARGE_CURSOR_TABLE_216; +-- test none execute error sql +set behavior_compat_options=''; +create table t_CurRowtype_Def_Case0001_1( +col1 tinyint primary key, +col2 smallint, +col3 int, +col4 bigint +); + +declare + cursor cur_CurRowtype_Def_Case0003_1 is select * from t_CurRowtype_Def_Case0001_1; + source cur_CurRowtype_Def_Case0003_1%rowtype; +begin + open cur_CurRowtype_Def_Case0003_1; + loop + fetch cur_CurRowtype_Def_Case0003_1 into source; + exit when cur_CurRowtype_Def_Case0003_1%notfound; + raise notice '% , %',source.col1,source.col5; + end loop; + close cur_CurRowtype_Def_Case0003_1; +end; +/ + +set behavior_compat_options='allow_procedure_compile_check'; +declare + cursor cur_CurRowtype_Def_Case0003_1 is select * from t_CurRowtype_Def_Case0001_1; + source cur_CurRowtype_Def_Case0003_1%rowtype; +begin + open cur_CurRowtype_Def_Case0003_1; + loop + fetch cur_CurRowtype_Def_Case0003_1 into source; + exit when cur_CurRowtype_Def_Case0003_1%notfound; + raise notice '% , %',source.col1,source.col5; + end loop; + close cur_CurRowtype_Def_Case0003_1; +end; +/ +drop table t_CurRowtype_Def_Case0001_1; --test: drop column create table int_4_2(a NUMBER, d NUMBER, b VARCHAR2(5)); @@ -1276,6 +1314,7 @@ end; / call check_compile_1(); +drop procedure check_compile_1; set behavior_compat_options=''; drop procedure check_compile; @@ -1344,6 +1383,7 @@ alter type foo alter attribute b type text;--success fetch c3; close c3; +drop type if exists foo; ---- 不在 TRANSACTION Block里的游标声明导致 core的问题 --游标依赖row type,后续alter type drop type if exists type_cursor_bugfix_0001; -- Gitee From faf284427a9aa33a57651f895b78e20329e5c359 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Fri, 30 Aug 2024 09:56:03 +0800 Subject: [PATCH 233/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E6=8C=89=E9=9C=80?= =?UTF-8?q?=E5=9B=9E=E6=94=BE=E5=AE=9E=E6=97=B6=E6=9E=84=E5=BB=BA=E5=86=85?= =?UTF-8?q?=E5=AD=98=E4=B8=8D=E8=B6=B3=E6=97=B6=EF=BC=8Cfailover=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E5=8D=A1=E4=BD=8F=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ondemand_extreme_rto/dispatcher.cpp | 3 +- .../ondemand_extreme_rto/page_redo.cpp | 83 +++++++++++++------ src/include/access/multi_redo_settings.h | 3 + .../access/ondemand_extreme_rto/page_redo.h | 1 + 4 files changed, 62 insertions(+), 28 deletions(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index 2e308b8231..a4aa050ffe 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -445,7 +445,8 @@ static void SetOndemandXLogParseFlagValue(uint32 maxParseBufNum) { g_ondemandXLogParseMemFullValue = maxParseBufNum * ONDEMAND_FORCE_PRUNE_RATIO; g_ondemandXLogParseMemCancelPauseVaule = maxParseBufNum * ONDEMAND_DISTRIBUTE_CANCEL_RATIO; - + g_ondemandXLogParseMemCancelPauseVaulePerPipeline = + (maxParseBufNum - g_ondemandXLogParseMemFullValue) / get_batch_redo_num(); g_ondemandRealtimeBuildQueueFullValue = REALTIME_BUILD_RECORD_QUEUE_SIZE * ONDEMAND_FORCE_PRUNE_RATIO; } diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index 2faf9087cb..ac912e0602 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -117,6 +117,7 @@ RedoItem g_hashmapPruneMark; uint32 g_ondemandXLogParseMemFullValue = 0; uint32 g_ondemandXLogParseMemCancelPauseVaule = 0; +uint32 g_ondemandXLogParseMemCancelPauseVaulePerPipeline = 0; uint32 g_ondemandRealtimeBuildQueueFullValue = 0; static const int PAGE_REDO_WORKER_ARG = 3; @@ -624,21 +625,44 @@ void BatchRedoSendMarkToPageRedoManager(RedoItem *sendMark) */ static void BatchRedoProcIfXLogParseMemFull() { - if (SS_ONDEMAND_RECOVERY_HASHMAP_FULL) { - if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { - BatchRedoSendMarkToPageRedoManager(&g_hashmapPruneMark); - } else { - BatchRedoSendMarkToPageRedoManager(&g_forceDistributeMark); + if (!SS_ONDEMAND_RECOVERY_HASHMAP_FULL) { + return; + } + + bool buildNormal = false; + if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { + buildNormal = true; + BatchRedoSendMarkToPageRedoManager(&g_hashmapPruneMark); + } else { + BatchRedoSendMarkToPageRedoManager(&g_forceDistributeMark); + } + // wait until hashmap have enough free block-records or current pipeline do not use any block-records + do { + if (pg_atomic_read_u32(&g_redoWorker->parseManager.memctl.usedblknum) < + g_ondemandXLogParseMemCancelPauseVaulePerPipeline) { + break; } - // wait until hashmap have enough free block-records or current pipeline do not use any block-records - do { - if (pg_atomic_read_u32(&g_redoWorker->parseManager.memctl.usedblknum) == 0) { - break; + + /* + * 1. send g_forceDistributeMark to pageRedoManager if we get in this loop in realtime + * build normal and now is realtime build failover + * 2. resend g_hashmapPruneMark to pageRedoManager if last prumeMax do not prume hashmap + * to excepted value + */ + if (unlikely(SS_ONDEMAND_REALTIME_BUILD_FAILOVER && buildNormal)) { + buildNormal = false; + BatchRedoSendMarkToPageRedoManager(&g_forceDistributeMark); + } else if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { + XLogRecPtr ckptPtr = pg_atomic_read_u64(&g_dispatcher->ckptRedoPtr); + XLogRecPtr prunePtr = + pg_atomic_read_u64(&g_dispatcher->pageLines[g_redoWorker->slotId].htabThd->nextPrunePtr); + if (XLByteEQ(prunePtr, ckptPtr)) { + BatchRedoSendMarkToPageRedoManager(&g_hashmapPruneMark); } - RedoInterruptCallBack(); - pg_usleep(100000L); // 100 ms - } while (SS_ONDEMAND_RECOVERY_HASHMAP_FULL); - } + } + RedoInterruptCallBack(); + pg_usleep(100000L); // 100 ms + } while (SS_ONDEMAND_RECOVERY_HASHMAP_FULL); } bool BatchRedoDistributeItems(void **eleArry, uint32 eleNum) @@ -849,10 +873,20 @@ static void WaitSegRedoWorkersQueueEmpty() } } +static void WaitTrxnRedoWorkersQueueEmpty() +{ + while (!SPSCBlockingQueueIsEmpty(g_dispatcher->trxnLine.managerThd->queue) || + !SPSCBlockingQueueIsEmpty(g_dispatcher->trxnQueue)) { + pg_usleep(100000L); /* 100 ms */ + RedoInterruptCallBack(); + } +} + void RedoPageManagerDistributeBlockRecord(XLogRecParseState *parsestate) { PageManagerPruneIfRealtimeBuildFailover(); WaitSegRedoWorkersQueueEmpty(); + WaitTrxnRedoWorkersQueueEmpty(); PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; HASH_SEQ_STATUS status; @@ -1157,7 +1191,6 @@ static void OndemandMergeHashMap(HTAB *srcHashmap, HTAB *dstHashmap) void PageManagerMergeHashMapInRealtimeBuild() { ondemand_htab_ctrl_t *procHtabCtrl = g_instance.comm_cxt.predo_cxt.redoItemHashCtrl[g_redoWorker->slotId]; - ondemand_htab_ctrl_t *targetHtabCtrl = g_dispatcher->pageLines[g_redoWorker->slotId].managerThd->redoItemHashCtrl; ondemand_htab_ctrl_t *nextHtabCtrlHold = (ondemand_htab_ctrl_t *)procHtabCtrl->nextHTabCtrl; // nextHtabCtrl for hold the next HtabCtrl ondemand_htab_ctrl_t *nextHtabCtrlFree = procHtabCtrl; // nextHtabCtrl for free space g_dispatcher->pageLines[g_redoWorker->slotId].managerThd->redoItemHashCtrl = @@ -1178,11 +1211,6 @@ void PageManagerProcLsnForwarder(RedoItem *lsnForwarder) PageManagerAddRedoItemToSegWorkers(lsnForwarder); PageManagerAddRedoItemToHashMapManager(lsnForwarder); PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - - for (uint32 i = 0; i < WorkerNumPerMng; ++i) { - AddPageRedoItem(myRedoLine->redoThd[i], lsnForwarder); - } PageManagerPruneIfRealtimeBuildFailover(); /* wait hashmapmng prune and segworker distribute segrecord to hashmap */ @@ -1436,6 +1464,7 @@ static void PageManagerPruneIfRealtimeBuildFailover() { if (SS_ONDEMAND_REALTIME_BUILD_FAILOVER && g_redoWorker->inRealtimeBuild) { PageManagerProcHashmapPrune(); + PageManagerAddRedoItemToSegWorkers(&g_forceDistributeMark); PageManagerMergeHashMapInRealtimeBuild(); g_redoWorker->inRealtimeBuild = false; } @@ -1772,7 +1801,8 @@ bool TrxnManagerDistributeItemsBeforeEnd(RedoItem *item) if (item == &g_redoEndMark) { exitFlag = true; } else if (item == (RedoItem *)&g_GlobalLsnForwarder) { - TrxnManagerPruneAndDistributeIfRealtimeBuildFailover(); + // trxn queue must be empty in ondemand realtime build failover + Assert(!(SS_ONDEMAND_REALTIME_BUILD_FAILOVER && g_redoWorker->inRealtimeBuild)); TrxnManagerProcLsnForwarder(item); } else if (item == (RedoItem *)&g_cleanupMark) { TrxnManagerProcCleanupMark(item); @@ -1809,7 +1839,6 @@ bool TrxnManagerDistributeItemsBeforeEnd(RedoItem *item) __FUNCTION__, &item->record); #endif TrxnManagerPruneIfQueueFullInRealtimeBuild(); - TrxnManagerPruneAndDistributeIfRealtimeBuildFailover(); TrxnManagerAddTrxnRecord(item, syncRecord); CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); } @@ -1865,6 +1894,7 @@ void TrxnManagerMain() } } CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + TrxnManagerPruneAndDistributeIfRealtimeBuildFailover(); if (!SPSCBlockingQueueIsEmpty(g_redoWorker->queue)) { GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); RedoItem *item = (RedoItem *)SPSCBlockingQueueTop(g_redoWorker->queue); @@ -2406,11 +2436,6 @@ void RedoPageWorkerMain() SPSCBlockingQueuePop(g_redoWorker->queue); continue; } - if ((void *)redoblockstateHead == (void *)&g_GlobalLsnForwarder) { - PageWorkerProcLsnForwarder((RedoItem *)redoblockstateHead); - SPSCBlockingQueuePop(g_redoWorker->queue); - continue; - } RedoBufferInfo bufferinfo = {0}; bool notfound = false; bool updateFsm = false; @@ -2585,7 +2610,7 @@ void SendLsnFowarder() // update and read in the same thread, so no need atomic operation g_GlobalLsnForwarder.record.ReadRecPtr = g_redoWorker->lastReplayedReadRecPtr; g_GlobalLsnForwarder.record.EndRecPtr = g_redoWorker->lastReplayedEndRecPtr; - g_GlobalLsnForwarder.record.refcount = get_real_recovery_parallelism() - XLOG_READER_NUM; + g_GlobalLsnForwarder.record.refcount = get_real_recovery_parallelism() - XLOG_READER_NUM - PAGE_REDO_WORKER_NUM; g_GlobalLsnForwarder.record.isDecode = true; PutRecordToReadQueue(&g_GlobalLsnForwarder.record); } @@ -3297,6 +3322,10 @@ void SegWorkerMain() SegWorkerProcLsnForwarder((RedoItem *)redoblockstateHead); SPSCBlockingQueuePop(g_redoWorker->queue); continue; + } else if ((void *)redoblockstateHead == (void *)&g_forceDistributeMark) { + SegWorkerRedoIfRealtimeBuildFailover(); + SPSCBlockingQueuePop(g_redoWorker->queue); + continue; } Assert(GetCurrentXLogRecParseType(redoblockstateHead) == PARSE_TYPE_SEG); diff --git a/src/include/access/multi_redo_settings.h b/src/include/access/multi_redo_settings.h index 00b08f39dc..972db23f4a 100644 --- a/src/include/access/multi_redo_settings.h +++ b/src/include/access/multi_redo_settings.h @@ -61,6 +61,9 @@ static const int ONDEMAND_AUXILIARY_WORKER_NUM = 2; // segredoworker and ctrlw static const int MAX_EXTREME_THREAD_NUM = MAX_PARSE_WORKERS * MAX_REDO_WORKERS_PER_PARSE + MAX_PARSE_WORKERS + MAX_PARSE_WORKERS + TRXN_REDO_MANAGER_NUM + TRXN_REDO_WORKER_NUM + XLOG_READER_NUM; +#define PAGE_REDO_WORKER_NUM (g_instance.attr.attr_storage.recovery_redo_workers_per_paser_worker * \ + g_instance.attr.attr_storage.batch_redo_num) + #ifndef ENABLE_LITE_MODE static const int MAX_RECOVERY_THREAD_NUM = (MAX_EXTREME_THREAD_NUM > MOST_FAST_RECOVERY_LIMIT) ? MAX_EXTREME_THREAD_NUM : MOST_FAST_RECOVERY_LIMIT; diff --git a/src/include/access/ondemand_extreme_rto/page_redo.h b/src/include/access/ondemand_extreme_rto/page_redo.h index 2046e280af..25585c5a9d 100644 --- a/src/include/access/ondemand_extreme_rto/page_redo.h +++ b/src/include/access/ondemand_extreme_rto/page_redo.h @@ -60,6 +60,7 @@ static const uint32 ADVANCE_GLOBALLSN_INTERVAL = 1; /* unit second */ extern uint32 g_ondemandXLogParseMemFullValue; extern uint32 g_ondemandXLogParseMemCancelPauseVaule; +extern uint32 g_ondemandXLogParseMemCancelPauseVaulePerPipeline; extern uint32 g_ondemandRealtimeBuildQueueFullValue; typedef bool (*OndemandCheckPauseCB)(void); -- Gitee From 1d428758d2389521a40aab6e263173a9ebf4490e Mon Sep 17 00:00:00 2001 From: jemappellehc <386956049@qq.com> Date: Thu, 29 Aug 2024 17:30:58 +0800 Subject: [PATCH 234/347] sonic hash agg bugfix (cherry picked from commit 3e042de906cccd8a598d0c93f0ac0887b12de1a1) --- .../vecexecutor/vectorsonic/vsonichashagg.cpp | 108 ++++++++++++------ src/test/regress/expected/hw_groupingsets.out | 2 +- 2 files changed, 71 insertions(+), 39 deletions(-) diff --git a/src/gausskernel/runtime/vecexecutor/vectorsonic/vsonichashagg.cpp b/src/gausskernel/runtime/vecexecutor/vectorsonic/vsonichashagg.cpp index b0972b2c44..720928b054 100644 --- a/src/gausskernel/runtime/vecexecutor/vectorsonic/vsonichashagg.cpp +++ b/src/gausskernel/runtime/vecexecutor/vectorsonic/vsonichashagg.cpp @@ -222,6 +222,74 @@ bool isAggrefSonicEnable(Oid aggfnoid) } } +static bool check_sonic_hash_agg_walker(Node* node) +{ + switch (nodeTag(node)) { + case T_SubPlan: { + SubPlan* sub_plan = (SubPlan*)node; + if (sub_plan->testexpr != NULL && IsA(sub_plan->testexpr, OpExpr)) { + OpExpr* op_expr = (OpExpr*)sub_plan->testexpr; + List* op_list = op_expr->args; + ListCell* lop = NULL; + foreach (lop, op_list) { + Expr* op_arg = (Expr*)lfirst(lop); + if (IsA(op_arg, Aggref)) { + Aggref* op_aggref = (Aggref*)op_arg; + if (!isAggrefSonicEnable(op_aggref->aggfnoid)) { + return true; + } + } + } + } else { + return true; + } + break; + } + case T_OpExpr: { + OpExpr* op_expr = (OpExpr*)node; + List* op_args = op_expr->args; + ListCell* lop = NULL; + foreach (lop, op_args) { + Expr* op_arg = (Expr*)lfirst(lop); + if (IsA(op_arg, Aggref)) { + Aggref* op_aggref = (Aggref*)op_arg; + if (!isAggrefSonicEnable(op_aggref->aggfnoid)) { + return true; + } + } + } + break; + } + case T_Aggref: { + Aggref* agg_ref = (Aggref*)node; + + if (!isAggrefSonicEnable(agg_ref->aggfnoid)) { + return true; + } + + /* count(*) has no args */ + if (agg_ref->aggfnoid == COUNTOID || agg_ref->aggfnoid == ANYCOUNTOID) { + break; + } + + Expr* ref_expr = (Expr*)linitial(agg_ref->args); + /* We only support simple expression cases */ + if (!isExprSonicEnable(ref_expr)) { + return true; + } + break; + } + default: { + if (!isExprSonicEnable((Expr*) node)) { + return true; + } + return expression_tree_walker(node, (bool (*)())check_sonic_hash_agg_walker, (void*)NULL); + } + } + + return expression_tree_walker(node, (bool (*)())check_sonic_hash_agg_walker, (void*)NULL); +} + /* * @Description : Decide use Sonic Hash Agg routine or not. * @in agg : Vector Aggregation Node information. @@ -298,44 +366,8 @@ bool isSonicHashAggEnable(VecAgg* node) List* qual_list = node->plan.qual; foreach (lc, qual_list) { Expr* qual_expr = (Expr*)lfirst(lc); - switch (nodeTag(qual_expr)) { - case T_SubPlan: { - SubPlan* sub_plan = (SubPlan*)qual_expr; - if (sub_plan->testexpr != NULL && IsA(sub_plan->testexpr, OpExpr)) { - OpExpr* op_expr = (OpExpr*)sub_plan->testexpr; - List* op_list = op_expr->args; - ListCell* lop = NULL; - foreach (lop, op_list) { - Expr* op_arg = (Expr*)lfirst(lop); - if (IsA(op_arg, Aggref)) { - Aggref* op_aggref = (Aggref*)op_arg; - if (!isAggrefSonicEnable(op_aggref->aggfnoid)) { - return false; - } - } - } - } else { - return false; - } - break; - } - case T_OpExpr: { - OpExpr* op_expr = (OpExpr*)qual_expr; - List* op_args = op_expr->args; - ListCell* lop = NULL; - foreach (lop, op_args) { - Expr* op_arg = (Expr*)lfirst(lop); - if (IsA(op_arg, Aggref)) { - Aggref* op_aggref = (Aggref*)op_arg; - if (!isAggrefSonicEnable(op_aggref->aggfnoid)) { - return false; - } - } - } - break; - } - default: - return false; + if (check_sonic_hash_agg_walker((Node*) qual_expr)) { + return false; } } diff --git a/src/test/regress/expected/hw_groupingsets.out b/src/test/regress/expected/hw_groupingsets.out index c2303cbb8e..078c702464 100755 --- a/src/test/regress/expected/hw_groupingsets.out +++ b/src/test/regress/expected/hw_groupingsets.out @@ -2186,7 +2186,7 @@ explain (verbose on, costs off) select a, sum(c) from vec_t1 group by grouping --------------------------------------------------- Row Adapter Output: a, (sum(c)) - -> Vector Sonic Hash Aggregate + -> Vector Hash Aggregate Output: a, sum(c) Group By Key: vec_t1.a Filter: (GROUPING(vec_t1.a) = 0) -- Gitee From 9fc0c6033340e6072cdd3841605f53721ad20410 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Thu, 29 Aug 2024 15:01:59 +0800 Subject: [PATCH 235/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E5=9C=A8=E5=9B=9E=E6=94=BE=E6=AD=A3=E5=B8=B8=E7=BB=93?= =?UTF-8?q?=E6=9D=9F=E6=97=B6=EF=BC=8C=E6=94=B6=E5=88=B0shutdownrequst?= =?UTF-8?q?=EF=BC=8C=E5=AF=BC=E8=87=B4=E6=8A=A5=E9=94=99StopRecoveryWorker?= =?UTF-8?q?s=20wait=20too=20long!!!?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/transam/extreme_rto/page_redo.cpp | 7 ++++++- .../access/transam/ondemand_extreme_rto/page_redo.cpp | 8 +++++++- .../access/transam/parallel_recovery/page_redo.cpp | 7 ++++++- src/gausskernel/storage/access/transam/xlog.cpp | 4 +++- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp index 864bc5698a..4b14847609 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp @@ -400,6 +400,11 @@ void HandlePageRedoInterrupts() HandlePageRedoInterruptsImpl(); } +static void LastMarkReachedBeforePageRedoExit(int code, Datum arg) +{ + LastMarkReached(); +} + void clean_smgr(uint64 &clear_redo_fd_count) { const uint64 clear_redo_fd_count_mask = 0x3FFFFF; @@ -2649,6 +2654,7 @@ void ParallelRedoThreadMain() t_thrd.page_redo_cxt.redo_worker_ptr = g_redoWorker; // regitster default interrupt call back (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + on_shmem_exit(LastMarkReachedBeforePageRedoExit, 0); SetupSignalHandlers(); InitGlobals(); @@ -2665,7 +2671,6 @@ void ParallelRedoThreadMain() ResourceManagerStop(); ereport(LOG, (errmsg("Page-redo-worker thread %u terminated, role:%u, slotId:%u, retcode %u.", g_redoWorker->id, g_redoWorker->role, g_redoWorker->slotId, retCode))); - LastMarkReached(); pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), PAGE_REDO_WORKER_EXIT); diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index ac912e0602..ee9dcda12d 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -415,6 +415,11 @@ void HandlePageRedoInterrupts() } } +static void LastMarkReachedBeforePageRedoExit(int code, Datum arg) +{ + LastMarkReached(); +} + void ReferenceRedoItem(void *item) { RedoItem *redoItem = (RedoItem *)item; @@ -941,6 +946,7 @@ static void WaitAndTryReleaseWorkerReplayedRec(PageRedoPipeline *myRedoLine, uin break; } } + RedoInterruptCallBack(); ReleaseReplayedInParse(); } } @@ -3729,6 +3735,7 @@ void ParallelRedoThreadMain() g_redoWorker->role, g_redoWorker->slotId))); // regitster default interrupt call back (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + on_shmem_exit(LastMarkReachedBeforePageRedoExit, 0); SetupSignalHandlers(); InitGlobals(); @@ -3742,7 +3749,6 @@ void ParallelRedoThreadMain() ResourceManagerStop(); ereport(LOG, (errmsg("Page-redo-worker thread %u terminated, role:%u, slotId:%u, retcode %u.", g_redoWorker->id, g_redoWorker->role, g_redoWorker->slotId, retCode))); - LastMarkReached(); pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), PAGE_REDO_WORKER_EXIT); diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp index c70f63aa34..77ab7ed0ac 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp @@ -284,6 +284,11 @@ void HandlePageRedoInterrupts() } } +static void LastMarkReachedBeforePageRedoExit(int code, Datum arg) +{ + LastMarkReached(); +} + /* HandleRedoPageRepair * if the page crc verify failed, call the function record the bad block. */ @@ -315,6 +320,7 @@ void PageRedoWorkerMain() SetupSignalHandlers(); (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + on_shmem_exit(LastMarkReachedBeforePageRedoExit, 0); if (g_instance.pid_cxt.PageRepairPID != 0) { (void)RegisterRedoPageRepairCallBack(HandleRedoPageRepair); } @@ -330,7 +336,6 @@ void PageRedoWorkerMain() StandbyReleaseAllLocks(); ResourceManagerStop(); ereport(LOG, (errmsg("Page-redo-worker thread %u terminated, retcode %d.", g_redoWorker->id, retCode))); - LastMarkReached(); pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->originId].threadState), PAGE_REDO_WORKER_EXIT); proc_exit(0); diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 6d61810da7..22af099b58 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -11931,7 +11931,9 @@ void ShutdownXLOG(int code, Datum arg) if (SS_STANDBY_FAILOVER || SS_STANDBY_PROMOTING) { ereport(LOG, (errmsg("[SS failover/SS switchover] Standby promote: skipping shutdown checkpoint"))); - } else { + } else if (SS_PRIMARY_NORMAL_REFORM && SS_CLUSTER_ONDEMAND_NOT_NORAML) { + ereport(LOG, (errmsg("[SS normal reform] Normal reform but ondemand recovery not finish in the last time: skipping shutdown checkpoint"))); + }else { if (RecoveryInProgress()) { (void)CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE); } else { -- Gitee From e2c1837fdb63edeb4529db8d12b0c7b55c1c5d2b Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Sat, 31 Aug 2024 19:22:34 +0800 Subject: [PATCH 236/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=89=93=E5=BC=80pro?= =?UTF-8?q?c=5Foutparam=5Foverride=E5=90=8E=EF=BC=8C=E8=A7=86=E5=9B=BE?= =?UTF-8?q?=E4=BE=9D=E8=B5=96package.function=EF=BC=8C=E6=89=A7=E8=A1=8C?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E6=97=B6=E8=BF=94=E5=9B=9E=E7=BB=93=E6=9E=9C?= =?UTF-8?q?=E4=B8=8D=E6=AD=A3=E7=A1=AE=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/runtime/executor/execQual.cpp | 20 +++++++---- .../runtime/vecexecutor/vecexpression.cpp | 12 +++++++ src/include/fmgr.h | 2 ++ src/include/vecexecutor/vecexpression.h | 3 ++ .../expected/out_param_func_overload.out | 34 +++++++++++++++++++ .../regress/sql/out_param_func_overload.sql | 25 ++++++++++++++ 6 files changed, 89 insertions(+), 7 deletions(-) diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index f7ace87578..3b3594d28f 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -2199,14 +2199,8 @@ static void tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc) } } -void set_result_for_plpgsql_language_function_with_outparam(FuncExprState *fcache, Datum *result, bool *isNull) +void set_result_for_plpgsql_language_function_with_outparam(Datum *result, bool *isNull) { - if (!IsA(fcache->xprstate.expr, FuncExpr)) { - return; - } - if (!fcache->is_plpgsql_func_with_outparam) { - return; - } HeapTupleHeader td = DatumGetHeapTupleHeader(*result); TupleDesc tupdesc; PG_TRY(); @@ -2237,6 +2231,18 @@ void set_result_for_plpgsql_language_function_with_outparam(FuncExprState *fcach pfree(nulls); } +void set_result_for_plpgsql_language_function_with_outparam(FuncExprState *fcache, Datum *result, bool *isNull) +{ + if (!IsA(fcache->xprstate.expr, FuncExpr)) { + return; + } + if (!fcache->is_plpgsql_func_with_outparam) { + return; + } + return set_result_for_plpgsql_language_function_with_outparam(result, isNull); +} + + bool ExecSetArgIsByValue(FunctionCallInfo fcinfo) { for (int i = 0; i < fcinfo->nargs; i++) { diff --git a/src/gausskernel/runtime/vecexecutor/vecexpression.cpp b/src/gausskernel/runtime/vecexecutor/vecexpression.cpp index c803c8f4c4..1b81973479 100644 --- a/src/gausskernel/runtime/vecexecutor/vecexpression.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecexpression.cpp @@ -1800,6 +1800,14 @@ static ScalarVector* GenericFunctionT(PG_FUNCTION_ARGS) if (!fenced) { rowcinfo->isnull = false; result = RowFunction(rowcinfo); + + if (fcinfo->is_plpgsql_language_function_with_outparam) { + bool is_null = false; + set_result_for_plpgsql_language_function_with_outparam(&result, &is_null); + if (is_null == true) { + rowcinfo->isnull = true; + } + } if (rowcinfo->isnull == false) { presult[i] = ScalarVector::DatumToScalarT(result, false); SET_NOTNULL(presultFlag[i]); @@ -2090,6 +2098,10 @@ static ScalarVector* ExecMakeVecFunctionResult( fcinfo->nargs += EXTRA_NARGS; fcinfo->isnull = false; + if (IsA(fcache->xprstate.expr, FuncExpr) && is_function_with_plpgsql_language_and_outparam(fcache->func.fn_oid)) { + fcinfo->is_plpgsql_language_function_with_outparam = true; + } + result = VecFunctionCallInvoke(fcinfo); fcinfo->nargs -= EXTRA_NARGS; pgstat_end_function_usage(&fcusage, true); diff --git a/src/include/fmgr.h b/src/include/fmgr.h index cb0d2e30a5..127b5d36d7 100644 --- a/src/include/fmgr.h +++ b/src/include/fmgr.h @@ -170,6 +170,7 @@ typedef struct FunctionCallInfoData { UDFInfoType udfInfo; StartWithFuncEvalInfo swinfo; CoercionContext ccontext; + bool is_plpgsql_language_function_with_outparam; FunctionCallInfoData() { @@ -185,6 +186,7 @@ typedef struct FunctionCallInfoData { isnull = false; can_ignore = false; ccontext = COERCION_UNKNOWN; + is_plpgsql_language_function_with_outparam = false; } } FunctionCallInfoData; diff --git a/src/include/vecexecutor/vecexpression.h b/src/include/vecexecutor/vecexpression.h index 19cad8aea2..1a66fb0a1a 100644 --- a/src/include/vecexecutor/vecexpression.h +++ b/src/include/vecexecutor/vecexpression.h @@ -34,4 +34,7 @@ GenericArgExtract ChooseExtractFun(Oid Dtype, Oid fn_oid = 0); +extern void set_result_for_plpgsql_language_function_with_outparam(Datum *result, bool *isNull); + + #endif /* VECEXPRESSION_H_ */ diff --git a/src/test/regress/expected/out_param_func_overload.out b/src/test/regress/expected/out_param_func_overload.out index 8c76cecb5d..dd1512266a 100644 --- a/src/test/regress/expected/out_param_func_overload.out +++ b/src/test/regress/expected/out_param_func_overload.out @@ -293,6 +293,40 @@ drop package pkg_type; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function out_param_func_overload.func(integer) drop cascades to function out_param_func_overload.func2(integer) +CREATE OR REPLACE PACKAGE pac_test_1 AS +FUNCTION f_test_1(para1 in out int, para2 in out int, para3 in out int) RETURN int; +END pac_test_1; +/ +CREATE OR REPLACE PACKAGE BODY pac_test_1 AS +FUNCTION f_test_1(para1 in out int, para2 in out int, para3 in out int) +RETURN int IS +BEGIN +RETURN 1; +END; +END pac_test_1; +/ +create table t1(c1 int,c2 text) with (ORIENTATION=COLUMN);; +insert into t1 select a,a || 'test' from generate_series(1,10) as a; +create view v1 as select c1,c2,pac_test_1.f_test_1(c1,c1,c1) from t1; +select * from v1; + c1 | c2 | f_test_1 +----+--------+---------- + 1 | 1test | 1 + 2 | 2test | 1 + 3 | 3test | 1 + 4 | 4test | 1 + 5 | 5test | 1 + 6 | 6test | 1 + 7 | 7test | 1 + 8 | 8test | 1 + 9 | 9test | 1 + 10 | 10test | 1 +(10 rows) + +drop view v1; +drop package pac_test_1; +NOTICE: drop cascades to function out_param_func_overload.f_test_1(integer,integer,integer) +drop table t1; --clean reset behavior_compat_options; drop schema out_param_func_overload cascade; diff --git a/src/test/regress/sql/out_param_func_overload.sql b/src/test/regress/sql/out_param_func_overload.sql index 06e4480318..1710d96dbc 100644 --- a/src/test/regress/sql/out_param_func_overload.sql +++ b/src/test/regress/sql/out_param_func_overload.sql @@ -238,6 +238,31 @@ END; drop package pkg_type; +CREATE OR REPLACE PACKAGE pac_test_1 AS +FUNCTION f_test_1(para1 in out int, para2 in out int, para3 in out int) RETURN int; +END pac_test_1; +/ + +CREATE OR REPLACE PACKAGE BODY pac_test_1 AS +FUNCTION f_test_1(para1 in out int, para2 in out int, para3 in out int) +RETURN int IS +BEGIN +RETURN 1; +END; +END pac_test_1; +/ + +create table t1(c1 int,c2 text) with (ORIENTATION=COLUMN);; +insert into t1 select a,a || 'test' from generate_series(1,10) as a; + +create view v1 as select c1,c2,pac_test_1.f_test_1(c1,c1,c1) from t1; +select * from v1; + +drop view v1; +drop package pac_test_1; +drop table t1; + + --clean reset behavior_compat_options; -- Gitee From 5b3f49a811877463489fc34044082d6602aa5c76 Mon Sep 17 00:00:00 2001 From: cchen676 Date: Mon, 2 Sep 2024 14:15:29 +0800 Subject: [PATCH 237/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=8C=89=E9=9C=80?= =?UTF-8?q?=E5=9B=9E=E6=94=BEredo=E9=98=B6=E6=AE=B5=E5=8F=AF=E8=83=BD?= =?UTF-8?q?=E5=8F=91=E7=94=9F=E7=9A=84segmentcheckfailed=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/buffer/bufmgr.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 196ef801ad..c31b7b4103 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -2302,6 +2302,7 @@ Buffer ReadBuffer_common_for_dms(ReadBufferMode readmode, BufferDesc* buf_desc, static inline void BufferDescSetPBLK(BufferDesc *buf, const XLogPhyBlock *pblk) { if (pblk != NULL) { + Assert(PhyBlockIsValid(*pblk)); buf->extra->seg_fileno = pblk->relNode; buf->extra->seg_blockno = pblk->block; } @@ -3297,6 +3298,14 @@ retry_new_buffer: } } + /* set Physical segment file. */ + if (ENABLE_DMS && pblk != NULL) { + Assert(PhyBlockIsValid(*pblk)); + buf->extra->seg_fileno = pblk->relNode; + buf->extra->seg_blockno = pblk->block; + MarkReadPblk(buf->buf_id, pblk); + } + return buf; } -- Gitee From 51f6f536e57f2b9be12de90a6f70ac8516103f16 Mon Sep 17 00:00:00 2001 From: dongning12 Date: Mon, 12 Aug 2024 15:50:00 +0800 Subject: [PATCH 238/347] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91dms=E7=9A=84=E7=BA=BF=E7=A8=8B=E6=89=93?= =?UTF-8?q?=E5=8D=B0error=E6=97=A5=E5=BF=97=E9=9C=80=E8=A6=81=E4=BD=BF?= =?UTF-8?q?=E7=94=A8try-catch=E7=BB=93=E6=9E=84=EF=BC=8C=E5=90=A6=E5=88=99?= =?UTF-8?q?=E7=BA=BF=E7=A8=8B=E4=BC=9A=E9=80=80=E5=87=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ddes/adapter/ss_dms_callback.cpp | 78 +++++++++++-------- 1 file changed, 47 insertions(+), 31 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index a0009f049e..46e0c18753 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -52,6 +52,8 @@ #include "storage/buf/bufmgr.h" #include "storage/ipc.h" +static void ReleaseResource(); + /* * Wake up startup process to replay WAL, or to notice that * failover has been requested. @@ -495,41 +497,55 @@ static int SetPrimaryIdOnStandby(int primary_id, unsigned long long list_stable) { char* type_string = NULL; type_string = SSGetLogHeaderTypeStr(); + int ret = DMS_SUCCESS; - for (int ntries = 0;; ntries++) { - SSReadControlFile(REFORM_CTRL_PAGE); /* need to double check */ - if (g_instance.dms_cxt.SSReformerControl.primaryInstId == primary_id && - g_instance.dms_cxt.SSReformerControl.list_stable == list_stable) { - ereport(LOG, (errmodule(MOD_DMS), - errmsg("%s Reform success, this is a standby:%d confirming new primary:%d, list_stable:%llu, " - "confirm ntries=%d.", type_string, SS_MY_INST_ID, primary_id, list_stable, ntries))); - return DMS_SUCCESS; - } else { - if (dms_reform_failed()) { - ereport(ERROR, - (errmodule(MOD_DMS), errmsg("%s Failed to confirm new primary: %d, list_stable:%llu, " - "control file indicates primary is %d, list_stable%llu; dms reform failed.", - type_string, (int)primary_id, list_stable, - g_instance.dms_cxt.SSReformerControl.primaryInstId, - g_instance.dms_cxt.SSReformerControl.list_stable))); - return DMS_ERROR; - } - if (ntries >= WAIT_REFORM_CTRL_REFRESH_TRIES) { - ereport(ERROR, - (errmodule(MOD_DMS), errmsg("%s Failed to confirm new primary: %d, list_stable:%llu, " - " control file indicates primary is %d, list_stable%llu; wait timeout.", - type_string, (int)primary_id, list_stable, - g_instance.dms_cxt.SSReformerControl.primaryInstId, - g_instance.dms_cxt.SSReformerControl.list_stable))); - return DMS_ERROR; + uint32 saveInterruptHoldoffCount = t_thrd.int_cxt.InterruptHoldoffCount; + PG_TRY(); + { + for (int ntries = 0;; ntries++) { + SSReadControlFile(REFORM_CTRL_PAGE); /* need to double check */ + if (g_instance.dms_cxt.SSReformerControl.primaryInstId == primary_id && + g_instance.dms_cxt.SSReformerControl.list_stable == list_stable) { + ereport(LOG, (errmodule(MOD_DMS), + errmsg("%s Reform success, this is a standby:%d confirming new primary:%d, list_stable:%llu, " + "confirm ntries=%d.", type_string, SS_MY_INST_ID, primary_id, list_stable, ntries))); + ret = DMS_SUCCESS; + break; + } else { + if (dms_reform_failed()) { + ereport(ERROR, + (errmodule(MOD_DMS), errmsg("%s Failed to confirm new primary: %d, list_stable:%llu, " + "control file indicates primary is %d, list_stable%llu; dms reform failed.", + type_string, (int)primary_id, list_stable, + g_instance.dms_cxt.SSReformerControl.primaryInstId, + g_instance.dms_cxt.SSReformerControl.list_stable))); + ret = DMS_ERROR; + break; + } + if (ntries >= WAIT_REFORM_CTRL_REFRESH_TRIES) { + ereport(ERROR, + (errmodule(MOD_DMS), errmsg("%s Failed to confirm new primary: %d, list_stable:%llu, " + " control file indicates primary is %d, list_stable%llu; wait timeout.", + type_string, (int)primary_id, list_stable, + g_instance.dms_cxt.SSReformerControl.primaryInstId, + g_instance.dms_cxt.SSReformerControl.list_stable))); + ret = DMS_ERROR; + break; + } } - } - CHECK_FOR_INTERRUPTS(); - pg_usleep(REFORM_WAIT_TIME); /* wait 0.01 sec, then retry */ + CHECK_FOR_INTERRUPTS(); + pg_usleep(REFORM_WAIT_TIME); /* wait 0.01 sec, then retry */ + } } - - return DMS_ERROR; + PG_CATCH(); + { + t_thrd.int_cxt.InterruptHoldoffCount = saveInterruptHoldoffCount; + ReleaseResource(); + ret = DMS_ERROR; + } + PG_END_TRY(); + return ret; } /* called on both new primary and all standby nodes to refresh status */ -- Gitee From b25189e014b7fb7542aa5b50edeb2bf0dcc3d10b Mon Sep 17 00:00:00 2001 From: shijuzheng1997 Date: Mon, 26 Aug 2024 19:42:22 +0800 Subject: [PATCH 239/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8DparseNodeString()=20e?= =?UTF-8?q?rror=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/readfuncs.cpp | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index bb52f82a63..7df5fdb85d 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -4189,6 +4189,34 @@ static SortGroup* _readSortGroup(SortGroup* local_node) READ_DONE(); } +static SortBy* _readSortBy(SortBy* local_node) +{ + READ_LOCALS_NULL(SortBy); + READ_TEMP_LOCALS(); + + READ_NODE_FIELD(node); + READ_ENUM_FIELD(sortby_dir, SortByDir); + READ_ENUM_FIELD(sortby_nulls, SortByNulls); + READ_NODE_FIELD(useOp); + READ_INT_FIELD(location); + + READ_DONE(); +} + +static A_Const* _readAConst(A_Const* local_node) +{ + READ_LOCALS_NULL(A_Const); + READ_TEMP_LOCALS(); + + token = pg_strtok(&length); + Value *ptr = (Value*)nodeRead(NULL, 0); + errno_t err = memcpy_s(&local_node->val, sizeof(Value), ptr, sizeof(Value)); + securec_check(err, "\0", "\0"); + READ_INT_FIELD(location); + + READ_DONE(); +} + static Unique* _readUnique(Unique* local_node) { READ_LOCALS_NULL(Unique); @@ -6765,6 +6793,10 @@ Node* parseNodeString(void) return_value = _readSort(NULL); } else if (MATCH("SORTGROUP", 9)) { return_value = _readSortGroup(NULL); + } else if (MATCH("SORTBY", 6)) { + return_value = _readSortBy(NULL); + } else if (MATCH("A_CONST", 7)) { + return_value = _readAConst(NULL); } else if (MATCH("UNIQUE", 6)) { return_value = _readUnique(NULL); } else if (MATCH("PLANNEDSTMT", 11)) { -- Gitee From 9045a72a96518ac93512ee304b6410a6a47475ff Mon Sep 17 00:00:00 2001 From: KeKe Date: Thu, 29 Aug 2024 20:41:25 +0800 Subject: [PATCH 240/347] =?UTF-8?q?cherry=20pick=2077d885f=20from=20https:?= =?UTF-8?q?//gitee.com/wangxingmiao/openGauss-server/pulls/6162=20?= =?UTF-8?q?=E6=9E=81=E7=AE=80=E5=AE=89=E8=A3=85demo=20database=E8=BE=93?= =?UTF-8?q?=E5=85=A5yes=E6=8A=A5=E9=94=99=E9=97=AE=E9=A2=98=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- simpleInstall/install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/simpleInstall/install.sh b/simpleInstall/install.sh index f256cbd70a..ef6bffe9e4 100644 --- a/simpleInstall/install.sh +++ b/simpleInstall/install.sh @@ -363,11 +363,11 @@ function fn_install_demoDB() then read -p "Would you like to create a demo database (yes/no)? " input fi - if [ $input == "yes" ] + if [ "$input"X == "yes"X ] then fn_load_demoDB 1>load.log 2>&1 fn_check_demoDB - elif [ $input == "no" ] + elif [ "$input"X == "no"X ] then return 2 else -- Gitee From a83b2231e9b510b35db29e3fcfc9ceccd5d0ad68 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 2 Sep 2024 15:52:27 +0800 Subject: [PATCH 241/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=88=9B=E5=BB=BAeve?= =?UTF-8?q?nt=E6=97=B6=E5=88=9D=E5=A7=8B=E4=B8=BAdisable=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/gram.y | 2 +- src/test/regress/expected/event.out | 13 +++++++++++++ src/test/regress/sql/event.sql | 4 ++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index d63e9923cd..e1202bfce6 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -15479,7 +15479,7 @@ callfunc_args: func_arg_expr n->end_time_expr = NULL; n->interval_time = NULL; n->complete_preserve = $12; - n->event_status = (EventStatus)$12; + n->event_status = (EventStatus)$13; n->event_comment_str = $14; n->event_query_str = $16; $$ = (Node *)n; diff --git a/src/test/regress/expected/event.out b/src/test/regress/expected/event.out index 809a9f2d4a..dab25c0c63 100644 --- a/src/test/regress/expected/event.out +++ b/src/test/regress/expected/event.out @@ -1015,6 +1015,19 @@ set role priv_c password 'event_123'; --fail Non-administrator users do not have the permission drop event if exists priv_e_a; ERROR: only event's owner have the permission to operate object "priv_e_a" +create event if not exists ee11 on schedule at now() + interval 1 second do update t1 set a = 0 where a = 1; +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +select job_name, enable, failure_msg from pg_job where job_name = 'ee11'; + job_name | enable | failure_msg +----------+--------+------------------------------------------- + ee11 | t | relation "t1" does not exist on datanode1 +(1 row) + \c event_b grant usage on schema priv_a to priv_c; set role priv_c password 'event_123'; diff --git a/src/test/regress/sql/event.sql b/src/test/regress/sql/event.sql index 1fa4122cac..93a28fabe2 100644 --- a/src/test/regress/sql/event.sql +++ b/src/test/regress/sql/event.sql @@ -440,6 +440,10 @@ set role priv_c password 'event_123'; --fail Non-administrator users do not have the permission drop event if exists priv_e_a; +create event if not exists ee11 on schedule at now() + interval 1 second do update t1 set a = 0 where a = 1; +select pg_sleep(2); +select job_name, enable, failure_msg from pg_job where job_name = 'ee11'; + \c event_b grant usage on schema priv_a to priv_c; set role priv_c password 'event_123'; -- Gitee From f39e926d7cf494a73332dac4d417606642d7dd12 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Mon, 26 Aug 2024 17:40:37 +0800 Subject: [PATCH 242/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=AD=98=E5=82=A8?= =?UTF-8?q?=E8=BF=87=E7=A8=8B=E6=89=A7=E8=A1=8Ccommit=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=20=EF=BC=88cherry=20picked=20commit?= =?UTF-8?q?=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index 52da7b5af0..f3cbd2f333 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -7727,7 +7727,7 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st gsplsql_report_query(expr); #ifndef ENABLE_MULTIPLE_NODES ListCell* l = NULL; - bool isforbid = true; + bool isforbid = false; bool savedisAllowCommitRollback = false; bool needResetErrMsg = false; foreach (l, SPI_plan_get_plan_sources(expr->plan)) { -- Gitee From 0affe97dabb63e49e2b09f8ba9e39d19b28db733 Mon Sep 17 00:00:00 2001 From: wofanzheng <2399541971@qq.com> Date: Tue, 3 Sep 2024 10:35:19 +0800 Subject: [PATCH 243/347] =?UTF-8?q?=E8=B0=83=E6=95=B4=E5=8E=8B=E7=BC=A9?= =?UTF-8?q?=E8=A1=A8=E5=88=B7=E8=84=8F=E9=80=BB=E8=BE=91=E5=92=8C=E6=99=AE?= =?UTF-8?q?=E9=80=9A=E8=A1=A8=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/smgr/md.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/gausskernel/storage/smgr/md.cpp b/src/gausskernel/storage/smgr/md.cpp index e2e64375c2..4e57e76f0c 100644 --- a/src/gausskernel/storage/smgr/md.cpp +++ b/src/gausskernel/storage/smgr/md.cpp @@ -2157,10 +2157,6 @@ ExtentLocation StorageConvert(SMgrRelation sRel, ForkNumber forknum, BlockNumber } if (v != NULL) { fd = v->mdfd_vfd; - } else { - RelFileNode node = sRel->smgr_rnode.node; - ereport(ERROR, (errmsg("could not find valid location: [%u/%u/%u/%u]", node.spcNode, node.dbNode, node.relNode, - logicBlockNumber))); } return {.fd = fd, .relFileNode = sRel->smgr_rnode.node, -- Gitee From b8a8381bf36b6e12a7c0f25ac440008803d9509c Mon Sep 17 00:00:00 2001 From: laishenghao Date: Thu, 15 Aug 2024 15:26:05 +0800 Subject: [PATCH 244/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3samplescan=E8=A1=8C?= =?UTF-8?q?=E6=95=B0=E4=BC=B0=E7=AE=97=E5=B7=AE=E8=B7=9D=E5=A4=A7=EF=BC=8C?= =?UTF-8?q?=E6=89=A7=E8=A1=8C=E8=AE=A1=E5=88=92=E9=80=89=E6=8B=A9=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E5=AF=BC=E8=87=B4=E7=9A=84=E6=80=A7=E8=83=BD=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=EF=BC=88cherry=20picked=20commit=20from=2084f04a5?= =?UTF-8?q?=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/path/costsize.cpp | 14 +-- src/test/regress/expected/tablesample_3.out | 103 +++++++++++++++----- src/test/regress/sql/tablesample_3.sql | 19 ++++ 3 files changed, 107 insertions(+), 29 deletions(-) diff --git a/src/gausskernel/optimizer/path/costsize.cpp b/src/gausskernel/optimizer/path/costsize.cpp index caa2c27c08..e1edaba99f 100755 --- a/src/gausskernel/optimizer/path/costsize.cpp +++ b/src/gausskernel/optimizer/path/costsize.cpp @@ -6230,14 +6230,14 @@ void finalize_dml_cost(ModifyTable* plan) * Parameters: * @in pctnode: node of percent args. * - * Return: float4 + * Return: float8 */ -static float4 get_samplefract(Node* pctnode) +static float8 get_samplefract(Node* pctnode) { - float4 samplefract; + float8 samplefract; if (IsA(pctnode, Const) && !((Const*)pctnode)->constisnull) { - samplefract = DatumGetFloat4(((Const*)pctnode)->constvalue); + samplefract = DatumGetFloat8(((Const*)pctnode)->constvalue); if (samplefract >= 0.0 && samplefract <= 100.0 && !isnan(samplefract)) { samplefract /= 100.0f; } else { @@ -6265,7 +6265,7 @@ static float4 get_samplefract(Node* pctnode) void system_samplescangetsamplesize(PlannerInfo* root, RelOptInfo* baserel, List* paramexprs) { Node* pctnode = NULL; - float4 samplefract; + float8 samplefract; /* Try to extract an estimate for the sample percentage */ pctnode = (Node*)linitial(paramexprs); @@ -6292,7 +6292,7 @@ void system_samplescangetsamplesize(PlannerInfo* root, RelOptInfo* baserel, List void bernoulli_samplescangetsamplesize(PlannerInfo* root, RelOptInfo* baserel, List* paramexprs) { Node* pctnode = NULL; - float4 samplefract; + float8 samplefract; /* Try to extract an estimate for the sample percentage */ pctnode = (Node*)linitial(paramexprs); @@ -6325,7 +6325,7 @@ void hybrid_samplescangetsamplesize(PlannerInfo* root, RelOptInfo* baserel, List foreach (lc, paramexprs) { Node* paramnode = (Node*)lfirst(lc); Node* pctnode = estimate_expression_value(root, paramnode); - float4 samplefract = 0.0; + float8 samplefract = 0.0; if (likely(pctnode)) { samplefract = get_samplefract(pctnode); } else { diff --git a/src/test/regress/expected/tablesample_3.out b/src/test/regress/expected/tablesample_3.out index a592ef2b04..9adf72f124 100755 --- a/src/test/regress/expected/tablesample_3.out +++ b/src/test/regress/expected/tablesample_3.out @@ -20,15 +20,16 @@ select count(*) from ((select * from test_tablesample tablesample SYSTEM(20) REP (1 row) explain (costs off) select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left join test_tablesample2 tablesample BERNOULLI(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; - QUERY PLAN ---------------------------------------------------------------------------------------- - Nested Loop Anti Join - Join Filter: (test_tablesample.id = test_tablesample2.id) + QUERY PLAN +--------------------------------------------------------------------------------------------- + Hash Anti Join + Hash Cond: (test_tablesample.id = test_tablesample2.id) -> Sample Scan on test_tablesample Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) - -> Sample Scan on test_tablesample2 - Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) -(6 rows) + -> Hash + -> Sample Scan on test_tablesample2 + Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) +(7 rows) select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left join test_tablesample2 tablesample BERNOULLI(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; id | name | salary | id | name | salary @@ -37,15 +38,19 @@ select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left j set enable_hashjoin to off; explain (costs off) select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left join test_tablesample2 tablesample BERNOULLI(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; - QUERY PLAN ---------------------------------------------------------------------------------------- - Nested Loop Anti Join - Join Filter: (test_tablesample.id = test_tablesample2.id) - -> Sample Scan on test_tablesample - Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) - -> Sample Scan on test_tablesample2 - Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) -(6 rows) + QUERY PLAN +--------------------------------------------------------------------------------------------- + Merge Anti Join + Merge Cond: (test_tablesample.id = test_tablesample2.id) + -> Sort + Sort Key: test_tablesample.id + -> Sample Scan on test_tablesample + Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) + -> Sort + Sort Key: test_tablesample2.id + -> Sample Scan on test_tablesample2 + Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) +(10 rows) select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left join test_tablesample2 tablesample BERNOULLI(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; id | name | salary | id | name | salary @@ -54,15 +59,16 @@ select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left j set enable_mergejoin to off; explain (costs off) select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left join test_tablesample2 tablesample BERNOULLI(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; - QUERY PLAN ---------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------- Nested Loop Anti Join Join Filter: (test_tablesample.id = test_tablesample2.id) -> Sample Scan on test_tablesample Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) - -> Sample Scan on test_tablesample2 - Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) -(6 rows) + -> Materialize + -> Sample Scan on test_tablesample2 + Sampling: bernoulli (50::double precision) REPEATABLE (200::double precision) +(7 rows) select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left join test_tablesample2 tablesample BERNOULLI(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; id | name | salary | id | name | salary @@ -315,8 +321,61 @@ select * from test_tablesample tablesample SYSTEM(50) REPEATABLE (200) left join ----+------+--------+----+------+-------- (0 rows) +-- test rows estimation of samplescan +set enable_hashjoin to on; +create table ss_rows_t1 (a int); +create table ss_rows_t2 (b int); +create table ss_rows_t3 (c int); +insert into ss_rows_t1 values (generate_series(1, 20000)); +insert into ss_rows_t2 values (generate_series(1, 20000)); +insert into ss_rows_t3 values (generate_series(1, 20000)); +explain select a from ss_rows_t1 tablesample system (99.999999); +--?.* +--?.* +--? Sample Scan on ss_rows_t1 (cost=.* rows=\d{5} width=4) + Sampling: system (99.999999::double precision) +(2 rows) + +explain select b from ss_rows_t2 tablesample bernoulli (99.999999); +--?.* +--?.* +--? Sample Scan on ss_rows_t2 (cost=.* rows=\d{5} width=4) + Sampling: bernoulli (99.999999::double precision) +(2 rows) + +explain select c from ss_rows_t3 tablesample hybrid (99.999999,99.999999); +--?.* +--?.* +--? Sample Scan on ss_rows_t3 (cost=.* rows=\d{5} width=4) + Sampling: hybrid (99.999999::double precision, 99.999999::double precision) +(2 rows) + +explain select a, b, c from + ss_rows_t1 tablesample system (99.999999) repeatable (325), + ss_rows_t2 tablesample bernoulli (99.999999) repeatable (0), + ss_rows_t3 tablesample hybrid (99.999999,99.999999) repeatable (510) + where a = b and b = c; +--?.* +--?.* +--? Hash Join (.* width=12) + Hash Cond: (ss_rows_t1.a = ss_rows_t3.c) +--? -> Hash Join (.* width=8) + Hash Cond: (ss_rows_t1.a = ss_rows_t2.b) +--? -> Sample Scan on ss_rows_t1 (cost=.* rows=\d{5} width=4) + Sampling: system (99.999999::double precision) REPEATABLE (325::double precision) +--? -> Hash (cost=.* rows=\d{5} width=4) +--? -> Sample Scan on ss_rows_t2 (cost=.* rows=\d{5} width=4) + Sampling: bernoulli (99.999999::double precision) REPEATABLE (0::double precision) +--? -> Hash (cost=.* rows=\d{5} width=4) +--? -> Sample Scan on ss_rows_t3 (cost=.* rows=\d{5} width=4) + Sampling: hybrid (99.999999::double precision, 99.999999::double precision) REPEATABLE (510::double precision) +(12 rows) + reset search_path; drop schema tablesample_schema4 cascade; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 5 other objects DETAIL: drop cascades to table tablesample_schema4.test_tablesample drop cascades to table tablesample_schema4.test_tablesample2 +drop cascades to table tablesample_schema4.ss_rows_t1 +drop cascades to table tablesample_schema4.ss_rows_t2 +drop cascades to table tablesample_schema4.ss_rows_t3 diff --git a/src/test/regress/sql/tablesample_3.sql b/src/test/regress/sql/tablesample_3.sql index 696fef561e..f2794ec18e 100755 --- a/src/test/regress/sql/tablesample_3.sql +++ b/src/test/regress/sql/tablesample_3.sql @@ -95,5 +95,24 @@ select * from test_tablesample tablesample BERNOULLI(50) REPEATABLE (200) left j explain (costs off) select * from test_tablesample tablesample SYSTEM(50) REPEATABLE (200) left join test_tablesample2 tablesample SYSTEM(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; select * from test_tablesample tablesample SYSTEM(50) REPEATABLE (200) left join test_tablesample2 tablesample SYSTEM(50) REPEATABLE (200) on test_tablesample.id=test_tablesample2.id where test_tablesample2.id is NULL; +-- test rows estimation of samplescan +set enable_hashjoin to on; +create table ss_rows_t1 (a int); +create table ss_rows_t2 (b int); +create table ss_rows_t3 (c int); +insert into ss_rows_t1 values (generate_series(1, 20000)); +insert into ss_rows_t2 values (generate_series(1, 20000)); +insert into ss_rows_t3 values (generate_series(1, 20000)); + +explain select a from ss_rows_t1 tablesample system (99.999999); +explain select b from ss_rows_t2 tablesample bernoulli (99.999999); +explain select c from ss_rows_t3 tablesample hybrid (99.999999,99.999999); + +explain select a, b, c from + ss_rows_t1 tablesample system (99.999999) repeatable (325), + ss_rows_t2 tablesample bernoulli (99.999999) repeatable (0), + ss_rows_t3 tablesample hybrid (99.999999,99.999999) repeatable (510) + where a = b and b = c; + reset search_path; drop schema tablesample_schema4 cascade; -- Gitee From 6f1014f4a6bb2c6e6783ef0ca81c1c2547437ebe Mon Sep 17 00:00:00 2001 From: cchen676 Date: Mon, 2 Sep 2024 16:58:03 +0800 Subject: [PATCH 245/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=BC=80=E5=90=AF?= =?UTF-8?q?=E5=A4=87=E6=9C=BA=E5=86=99=E8=BD=AC=E5=8F=91=EF=BC=8Cinsert=20?= =?UTF-8?q?into=E5=88=86=E5=8C=BA=E8=A1=A8=E5=9C=A8=E4=B8=BB=E6=9C=BA?= =?UTF-8?q?=E4=BE=A7=E7=AD=89=E9=94=81=E8=B6=85=E6=97=B6=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/process/tcop/postgres.cpp | 27 ++++++++++--------- .../storage/replication/libpqsw.cpp | 13 +++++---- src/include/replication/libpqsw.h | 4 +++ 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index b364d40f8b..e9ac1d9145 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -2726,23 +2726,11 @@ static void exec_simple_query(const char* query_string, MessageType messageType, querystringForLibpqsw = query_string; } // create table as / select into / insert into - if (nodeTag(parsetree) == T_CreateTableAsStmt + if (NODETAG_IS_WRITE(parsetree) || ((nodeTag(parsetree) == T_SelectStmt) && ((SelectStmt*)parsetree)->intoClause != NULL) ) { libpqsw_set_redirect(true); } - - } - - if (ENABLE_REMOTE_EXECUTE && libpqsw_get_redirect()) { - if (libpqsw_process_query_message(commandTag, NULL, querystringForLibpqsw, is_multistmt, lnext(parsetree_item) == NULL)) { - libpqsw_trace_q_msg(commandTag, querystringForLibpqsw); - if (snapshot_set) { - PopActiveSnapshot(); - } - finish_xact_command(); - continue; - } } BeginCommand(commandTag, dest); @@ -2782,6 +2770,19 @@ static void exec_simple_query(const char* query_string, MessageType messageType, snapshot_set = true; } + if (ENABLE_REMOTE_EXECUTE && libpqsw_get_redirect()) { + if (libpqsw_process_query_message(commandTag, NULL, querystringForLibpqsw, is_multistmt, + lnext(parsetree_item) == NULL)) { + libpqsw_trace_q_msg(commandTag, querystringForLibpqsw); + if (snapshot_set) { + PopActiveSnapshot(); + } + CommandCounterIncrement(); + finish_xact_command(); + continue; + } + } + /* * Before going into planner, set default work mode. */ diff --git a/src/gausskernel/storage/replication/libpqsw.cpp b/src/gausskernel/storage/replication/libpqsw.cpp index a5da8c3178..689c37ff07 100644 --- a/src/gausskernel/storage/replication/libpqsw.cpp +++ b/src/gausskernel/storage/replication/libpqsw.cpp @@ -962,6 +962,9 @@ static bool libpqsw_need_localexec_forSimpleQuery(const char *commandTag, List * return ret; } } + } else if (libpqsw_get_redirect() && libpqsw_get_transaction()) { + get_redirect_manager()->ss_standby_state |= SS_STANDBY_REQ_WRITE_REDIRECT; + return ret; } libpqsw_set_end(false); @@ -1144,11 +1147,6 @@ bool libpqsw_process_query_message(const char* commandTag, List* query_list, con } } - if (get_sw_cxt()->streamConn->xactStatus == PQTRANS_INERROR) { - libpqsw_disconnect(true); - AbortCurrentTransaction(); - } - // because we are not skip Q message process, so send_ready_for_query will be true after transfer. // but after transter, master will send Z message for front, so we not need to this flag. if (get_redirect_manager()->state.client_enable_ce || libpqsw_end_command(commandTag) || @@ -1163,6 +1161,11 @@ bool libpqsw_process_query_message(const char* commandTag, List* query_list, con SetTxnInfoForSSLibpqsw(get_redirect_manager()->ss_standby_sxid, get_redirect_manager()->ss_standby_scid); get_redirect_manager()->ss_standby_state &= ~(SS_STANDBY_RES_OK_REDIRECT | SS_STANDBY_REQ_WRITE_REDIRECT); } + + if (get_sw_cxt()->streamConn->xactStatus == PQTRANS_INERROR) { + libpqsw_disconnect(true); + AbortCurrentTransaction(); + } } else { // we need send_ready_for_query for init. libpqsw_set_end(true); diff --git a/src/include/replication/libpqsw.h b/src/include/replication/libpqsw.h index 7eb4402169..5b102d0627 100644 --- a/src/include/replication/libpqsw.h +++ b/src/include/replication/libpqsw.h @@ -40,6 +40,10 @@ enum PhaseType { LIBPQ_SW_BIND }; +#define NODETAG_IS_WRITE(a) \ + (nodeTag(a) == T_CreateTableAsStmt || nodeTag(a) == T_InsertStmt || nodeTag(a) == T_DeleteStmt || \ + nodeTag(a) == T_UpdateStmt || nodeTag(a) == T_MergeStmt) + void DestroyStringInfo(StringInfo str); /* process msg from backend */ bool libpqsw_process_message(int qtype, const StringInfo msg); -- Gitee From 7d862324c49e522f3180f2ca6cdbf679db28e530 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=92=9F=E6=89=BF=E5=BF=97?= <842536125@qq.com> Date: Mon, 2 Sep 2024 16:30:02 +0800 Subject: [PATCH 246/347] Changes to be committed: modified: data.cpp Changes to be committed: modified: data.cpp Changes to be committed: modified: data.cpp Changes to be committed: modified: lib/page_compression/PageCompression.cpp Changes to be committed: modified: bin/pg_probackup/data.cpp modified: lib/page_compression/PageCompression.cpp --- src/lib/page_compression/PageCompression.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/lib/page_compression/PageCompression.cpp b/src/lib/page_compression/PageCompression.cpp index 9718795a3b..7160868b0f 100644 --- a/src/lib/page_compression/PageCompression.cpp +++ b/src/lib/page_compression/PageCompression.cpp @@ -301,11 +301,6 @@ size_t CalRealWriteSize(char *buffer) return BLCKSZ; } - /* check the assignment made during backup */ - if (phdr->pd_lower & COMP_ASIGNMENT) { - return BLCKSZ; - } - size_t compressedBufferSize; uint8 pagetype = PageGetPageLayoutVersion(buffer); if (pagetype == PG_UHEAP_PAGE_LAYOUT_VERSION) { -- Gitee From c1b6c3f679051a84f9660b16b10f60b945eca064 Mon Sep 17 00:00:00 2001 From: Eureka Date: Mon, 2 Sep 2024 19:37:39 +0800 Subject: [PATCH 247/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=88=97=E5=AD=98?= =?UTF-8?q?=E8=A1=A8=20select=20(t).*=20from=20t;=E8=AF=AD=E5=8F=A5?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E4=B8=8D=E8=AF=86=E5=88=AB=E7=9A=84=E9=94=99?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_target.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/common/backend/parser/parse_target.cpp b/src/common/backend/parser/parse_target.cpp index 5c3f683647..69d0ed13c4 100644 --- a/src/common/backend/parser/parse_target.cpp +++ b/src/common/backend/parser/parse_target.cpp @@ -1288,7 +1288,13 @@ static List* ExpandRowReference(ParseState* pstate, Node* expr, bool targetlist) if (IsA(expr, Var) && ((Var*)expr)->vartype == RECORDOID) { tupleDesc = expandRecordVariable(pstate, (Var*)expr, 0); } else if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) { - tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), exprTypmod(expr)); + if (IsA(expr, RowExpr) && ((RowExpr*)expr)->row_typeid == RECORDOID) { + RowExpr* rowexpr = (RowExpr*)expr; + tupleDesc = ExecTypeFromExprList(rowexpr->args, rowexpr->colnames); + BlessTupleDesc(tupleDesc); + } else { + tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), exprTypmod(expr)); + } } if (unlikely(tupleDesc == NULL)) { -- Gitee From ff88b05008f9d99f2311c9c3295eb9355428e93b Mon Sep 17 00:00:00 2001 From: cchen676 Date: Fri, 23 Aug 2024 14:54:30 +0800 Subject: [PATCH 248/347] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E4=B8=BB=E6=9C=BA=E9=80=9A=E8=BF=87=E5=B9=BF=E6=92=AD=E5=BD=A2?= =?UTF-8?q?=E5=BC=8F=E6=94=B6=E9=9B=86=E5=A4=87=E6=9C=BA=E7=9A=84oldestxmi?= =?UTF-8?q?n=E7=9A=84=E5=BC=80=E5=85=B3=E5=92=8C=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + .../backend/utils/misc/guc/guc_storage.cpp | 11 +++ .../ddes/adapter/ss_dms_auxiliary.cpp | 4 + .../ddes/adapter/ss_dms_callback.cpp | 24 +++-- .../ddes/adapter/ss_transaction.cpp | 94 ++++++++++++++++++- src/gausskernel/ddes/adapter/ss_xmin.cpp | 4 + .../process/postmaster/postmaster.cpp | 4 +- .../process/threadpool/knl_instance.cpp | 1 + src/gausskernel/storage/ipc/procarray.cpp | 19 +++- src/include/ddes/dms/ss_common_attr.h | 19 ++-- src/include/ddes/dms/ss_transaction.h | 4 +- .../knl/knl_guc/knl_instance_attr_storage.h | 1 + src/include/knl/knl_instance.h | 1 + .../regress/output/recovery_2pc_tools.source | 1 + 14 files changed, 166 insertions(+), 22 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 7270e459fd..fc3e2166b4 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -810,6 +810,7 @@ light_comm|bool|0,0|NULL|NULL| enable_proc_coverage|bool|0,0|NULL|NULL| ignore_standby_lsn_window|int|0,2147483647|ms|NULL| ignore_feedback_xmin_window|int|0,2147483647|ms|NULL| +ss_enable_bcast_getoldestxmin|bool|0,0|NULL|NULL| ss_enable_bcast_snapshot|bool|0,0|NULL|NULL| ss_txnstatus_cache_size|int|0,524288|NULL|NULL| subscription_conflict_resolution|enum|error,apply_remote,keep_local|NULL|NULL| diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index ed30d27213..2011581669 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -1292,6 +1292,17 @@ static void InitStorageConfigureNamesBool() NULL}, #endif + {{"ss_enable_bcast_getoldestxmin", + PGC_POSTMASTER, + NODE_SINGLENODE, + SHARED_STORAGE_OPTIONS, + gettext_noop("Enable broadcast to get oldest xmin by primay."), + NULL}, + &g_instance.attr.attr_storage.dms_attr.enable_bcast_getoldestxmin, + false, + NULL, + NULL, + NULL}, {{"ss_enable_bcast_snapshot", PGC_POSTMASTER, NODE_SINGLENODE, diff --git a/src/gausskernel/ddes/adapter/ss_dms_auxiliary.cpp b/src/gausskernel/ddes/adapter/ss_dms_auxiliary.cpp index be335f4e03..9ded9efad0 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_auxiliary.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_auxiliary.cpp @@ -110,6 +110,10 @@ void SSInitXminInfo() return; } + if (ENABLE_SS_BCAST_GETOLDESTXMIN) { + return; + } + ss_xmin_info_t *xmin_info = &g_instance.dms_cxt.SSXminInfo; if (xmin_info->snap_cache != NULL) { return; diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 46e0c18753..164b104d9b 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -226,10 +226,6 @@ static int CBGetSnapshotData(void *db_handle, dms_opengauss_txn_snapshot_t *txn_ return DMS_ERROR; } - if (!SSCanFetchLocalSnapshotTxnRelatedInfo()) { - return DMS_ERROR; - } - int retCode = DMS_ERROR; SnapshotData snapshot = {SNAPSHOT_MVCC}; uint32 saveInterruptHoldoffCount = t_thrd.int_cxt.InterruptHoldoffCount; @@ -242,10 +238,12 @@ static int CBGetSnapshotData(void *db_handle, dms_opengauss_txn_snapshot_t *txn_ txn_snapshot->xmax = snapshot.xmax; txn_snapshot->snapshotcsn = snapshot.snapshotcsn; txn_snapshot->localxmin = u_sess->utils_cxt.RecentGlobalXmin; - if (RecordSnapshotBeforeSend(inst_id, txn_snapshot->xmin)) { - retCode = DMS_SUCCESS; + if (!ENABLE_SS_BCAST_GETOLDESTXMIN) { + if (RecordSnapshotBeforeSend(inst_id, txn_snapshot->xmin)) { + retCode = DMS_SUCCESS; + } } else { - retCode = DMS_ERROR; + retCode = DMS_SUCCESS; } } } @@ -1255,6 +1253,9 @@ static int32 CBProcessBroadcast(void *db_handle, dms_broadcast_context_t *broad_ PG_TRY(); { switch (bcast_op) { + case BCAST_GET_XMIN: + ret = SSGetOldestXmin(data, len, output_msg, output_msg_len); + break; case BCAST_SI: ret = SSProcessSharedInvalMsg(data, len); break; @@ -1286,7 +1287,7 @@ static int32 CBProcessBroadcast(void *db_handle, dms_broadcast_context_t *broad_ ret = SSCheckDbBackends(data, len, output_msg, output_msg_len); break; case BCAST_SEND_SNAPSHOT: - ret = SSUpdateLatestSnapshotOfStandby(data, len); + ret = SSUpdateLatestSnapshotOfStandby(data, len, output_msg, output_msg_len); break; case BCAST_RELOAD_REFORM_CTRL_PAGE: ret = SSReloadReformCtrlPage(len); @@ -1317,6 +1318,9 @@ static int32 CBProcessBroadcastAck(void *db_handle, dms_broadcast_context_t *bro SSBroadcastOpAck bcast_op = *(SSBroadcastOpAck *)data; switch (bcast_op) { + case BCAST_GET_XMIN_ACK: + ret = SSGetOldestXminAck((SSBroadcastXminAck *)data); + break; case BCAST_CHECK_DB_BACKENDS_ACK: ret = SSCheckDbBackendsAck(data, len); break; @@ -1959,7 +1963,9 @@ static void CBReformStartNotify(void *db_handle, dms_reform_start_context_t *rs_ reform_info->bitmap_nodes = rs_cxt->bitmap_participated; reform_info->bitmap_reconnect = rs_cxt->bitmap_reconnect; reform_info->dms_role = rs_cxt->role; - SSXminInfoPrepare(); + if (!ENABLE_SS_BCAST_GETOLDESTXMIN) { + SSXminInfoPrepare(); + } reform_info->reform_ver = reform_info->reform_start_time; reform_info->in_reform = true; char reform_type_str[reform_type_str_len] = {0}; diff --git a/src/gausskernel/ddes/adapter/ss_transaction.cpp b/src/gausskernel/ddes/adapter/ss_transaction.cpp index 52758ec82f..a52b345518 100644 --- a/src/gausskernel/ddes/adapter/ss_transaction.cpp +++ b/src/gausskernel/ddes/adapter/ss_transaction.cpp @@ -151,13 +151,20 @@ Snapshot SSGetSnapshotData(Snapshot snapshot) return snapshot; } -int SSUpdateLatestSnapshotOfStandby(char *data, uint32 len) +int SSUpdateLatestSnapshotOfStandby(char *data, uint32 len, char *output_msg, uint32 *output_msg_len) { if (unlikely(len != sizeof(SSBroadcastSnapshot))) { ereport(DEBUG1, (errmsg("invalid broadcast set snapshot message"))); return DMS_ERROR; } + if (ENABLE_SS_BCAST_GETOLDESTXMIN && output_msg != NULL && output_msg_len != NULL) { + SSBroadcastXminAck* getXminReq = (SSBroadcastXminAck *)output_msg; + getXminReq->type = BCAST_GET_XMIN_ACK; + GetOldestGlobalProcXmin(&(getXminReq->xmin)); + *output_msg_len = sizeof(SSBroadcastXminAck); + } + SSBroadcastSnapshot *received_data = (SSBroadcastSnapshot *)data; if (TransactionIdPrecedes(received_data->xmax, g_instance.dms_cxt.latest_snapshot_xmax)) { ereport(WARNING, (errmsg("Receive oldest one, can't update:%lu/%lu", received_data->xmin, @@ -1047,3 +1054,88 @@ bool SSCanFetchLocalSnapshotTxnRelatedInfo() return false; } + +int SSGetOldestXmin(char *data, uint32 len, char *output_msg, uint32 *output_msg_len) +{ + if (unlikely(len != sizeof(SSBroadcastXmin))) { + ereport(DEBUG1, (errmsg("invalid broadcast xmin message"))); + return DMS_ERROR; + } + + SSBroadcastXminAck* getXminReq = (SSBroadcastXminAck *)output_msg; + getXminReq->type = BCAST_GET_XMIN_ACK; + getXminReq->xmin = InvalidTransactionId; + GetOldestGlobalProcXmin(&(getXminReq->xmin)); + *output_msg_len = sizeof(SSBroadcastXminAck); + return DMS_SUCCESS; +} + +/* Calbulate the oldest xmin during broadcast xmin ack */ +int SSGetOldestXminAck(SSBroadcastXminAck *ack_data) +{ + TransactionId xmin_ack = pg_atomic_read_u64(&g_instance.dms_cxt.xminAck); + if (TransactionIdIsValid(ack_data->xmin) && TransactionIdIsNormal(ack_data->xmin) && + TransactionIdPrecedes(ack_data->xmin, xmin_ack)) { + pg_atomic_write_u64(&g_instance.dms_cxt.xminAck, ack_data->xmin); + } + return DMS_SUCCESS; +} + +bool SSGetOldestXminFromAllStandby(TransactionId xmin, TransactionId xmax, CommitSeqNo csn) +{ + dms_context_t dms_ctx; + InitDmsContext(&dms_ctx); + SSBroadcastXminAck xmin_bcast_ack; + unsigned int len_of_ack = sizeof(SSBroadcastXminAck); + SSBroadcastSnapshot latest_snapshot; + dms_broadcast_info_t dms_broad_info; + SSBroadcastXmin xmin_data; + int ret; + if (ENABLE_SS_BCAST_SNAPSHOT) { + latest_snapshot.xmin = xmin; + latest_snapshot.xmax = xmax; + latest_snapshot.csn = csn; + latest_snapshot.type = BCAST_SEND_SNAPSHOT; + dms_broad_info = { + .data = (char *)&latest_snapshot, + .len = sizeof(SSBroadcastSnapshot), + .output = (char *)&xmin_bcast_ack, + .output_len = &len_of_ack, + .scope = DMS_BROADCAST_ONLINE_LIST, + .inst_map = 0, + .timeout = SS_BROADCAST_WAIT_ONE_SECOND, + .handle_recv_msg = (unsigned char)true, + .check_session_kill = (unsigned char)true + }; + } else { + xmin_data.type = BCAST_GET_XMIN; + xmin_data.xmin = InvalidTransactionId; + dms_broad_info = { + .data = (char *)&xmin_data, + .len = sizeof(SSBroadcastXmin), + .output = (char *)&xmin_bcast_ack, + .output_len = &len_of_ack, + .scope = DMS_BROADCAST_ONLINE_LIST, + .inst_map = 0, + .timeout = SS_BROADCAST_WAIT_ONE_SECOND, + .handle_recv_msg = (unsigned char)true, + .check_session_kill = (unsigned char)true + }; + } + + pg_atomic_write_u64(&g_instance.dms_cxt.xminAck, MaxTransactionId); + + bool bcast_snapshot = ENABLE_SS_BCAST_SNAPSHOT; + do { + ret = dms_broadcast_msg(&dms_ctx, &dms_broad_info); + if (ret == DMS_SUCCESS) { + return true; + } + + if (bcast_snapshot) { + pg_usleep(5000L); + } else { + return false; + } + } while (ret != DMS_SUCCESS); +} \ No newline at end of file diff --git a/src/gausskernel/ddes/adapter/ss_xmin.cpp b/src/gausskernel/ddes/adapter/ss_xmin.cpp index 7b4ce8e540..2a7b461221 100644 --- a/src/gausskernel/ddes/adapter/ss_xmin.cpp +++ b/src/gausskernel/ddes/adapter/ss_xmin.cpp @@ -218,6 +218,10 @@ void SSUpdateNodeOldestXmin(uint8 inst_id, unsigned long long oldest_xmin) void SSSyncOldestXminWhenReform(uint8 reformer_id) { + if (ENABLE_SS_BCAST_GETOLDESTXMIN) { + return; + } + ss_xmin_info_t *xmin_info = &g_instance.dms_cxt.SSXminInfo; ss_reform_info_t *reform_info = &g_instance.dms_cxt.SSReformInfo; diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index e56a4976bc..1919e13b16 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -3109,7 +3109,9 @@ int PostmasterMain(int argc, char* argv[]) if (g_instance.attr.attr_storage.dms_attr.enable_dms) { /* need to initialize before STARTUP */ DMSInit(); - g_instance.pid_cxt.DmsAuxiliaryPID = initialize_util_thread(DMS_AUXILIARY_THREAD); + if (!ENABLE_SS_BCAST_GETOLDESTXMIN) { + g_instance.pid_cxt.DmsAuxiliaryPID = initialize_util_thread(DMS_AUXILIARY_THREAD); + } } } diff --git a/src/gausskernel/process/threadpool/knl_instance.cpp b/src/gausskernel/process/threadpool/knl_instance.cpp index 30da9b02cd..b04fa18522 100755 --- a/src/gausskernel/process/threadpool/knl_instance.cpp +++ b/src/gausskernel/process/threadpool/knl_instance.cpp @@ -184,6 +184,7 @@ static void knl_g_dms_init(knl_g_dms_context *dms_cxt) { Assert(dms_cxt != NULL); dms_cxt->dmsProcSid = 0; + dms_cxt->xminAck = 0; dms_cxt->SSReformerControl.list_stable = 0; dms_cxt->SSReformerControl.primaryInstId = -1; dms_cxt->SSReformInfo.in_reform = false; diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp index 4f012ce0a9..cf63dac2aa 100755 --- a/src/gausskernel/storage/ipc/procarray.cpp +++ b/src/gausskernel/storage/ipc/procarray.cpp @@ -5146,9 +5146,22 @@ void CalculateLocalLatestSnapshot(bool forceCalc) globalxmin = xmin; if (ENABLE_DMS && SS_PRIMARY_MODE) { - SSUpdateNodeOldestXmin(SS_MY_INST_ID, globalxmin); - globalxmin = SSGetGlobalOldestXmin(globalxmin); - if (ENABLE_SS_BCAST_SNAPSHOT) { + if (ENABLE_SS_BCAST_GETOLDESTXMIN) { + if (SSGetOldestXminFromAllStandby(xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo)) { + TransactionId ss_oldest_xmin = pg_atomic_read_u64(&g_instance.dms_cxt.xminAck); + if (TransactionIdIsValid(ss_oldest_xmin) && TransactionIdIsNormal(ss_oldest_xmin) && + TransactionIdPrecedes(ss_oldest_xmin, globalxmin)) { + globalxmin = ss_oldest_xmin; + } + } else { + globalxmin = t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin; + } + } else { + SSUpdateNodeOldestXmin(SS_MY_INST_ID, globalxmin); + globalxmin = SSGetGlobalOldestXmin(globalxmin); + } + + if (ENABLE_SS_BCAST_SNAPSHOT && !ENABLE_SS_BCAST_GETOLDESTXMIN) { SSSendLatestSnapshotToStandby(xmin, xmax, t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); } } diff --git a/src/include/ddes/dms/ss_common_attr.h b/src/include/ddes/dms/ss_common_attr.h index c3807927a2..7490983b3c 100644 --- a/src/include/ddes/dms/ss_common_attr.h +++ b/src/include/ddes/dms/ss_common_attr.h @@ -1,4 +1,3 @@ - /* * Copyright (c) 2020 Huawei Technologies Co.,Ltd. * @@ -13,9 +12,9 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * --------------------------------------------------------------------------------------- - * + * * ss_common_attr.h - * + * * IDENTIFICATION * src/include/ddes/dms/ss_common_attr.h * @@ -36,12 +35,18 @@ #define ENABLE_VERIFY_PAGE_VERSION false #define ENABLE_SS_TXNSTATUS_CACHE false #define ENABLE_SS_BCAST_SNAPSHOT false +#define ENABLE_SS_BCAST_GETOLDESTXMIN false #define SS_SINGLE_CLUSTER false #else #define ENABLE_DMS (g_instance.attr.attr_storage.dms_attr.enable_dms && !IsInitdb) #define ENABLE_VERIFY_PAGE_VERSION (g_instance.attr.attr_storage.dms_attr.enable_verify_page) #define ENABLE_SS_TXNSTATUS_CACHE (ENABLE_DMS && g_instance.attr.attr_storage.dms_attr.txnstatus_cache_size > 0) -#define ENABLE_SS_BCAST_SNAPSHOT (ENABLE_DMS && g_instance.attr.attr_storage.dms_attr.enable_bcast_snapshot) +#define ENABLE_SS_BCAST_SNAPSHOT \ + (ENABLE_DMS && g_instance.attr.attr_storage.dms_attr.enable_bcast_snapshot && \ + !g_instance.attr.attr_storage.ss_enable_dorado) +#define ENABLE_SS_BCAST_GETOLDESTXMIN \ + (g_instance.attr.attr_storage.dms_attr.enable_bcast_getoldestxmin && \ + !g_instance.attr.attr_storage.ss_enable_dorado) #define SS_SINGLE_CLUSTER (ENABLE_DMS && !g_instance.attr.attr_storage.ss_enable_dorado) #endif @@ -161,7 +166,8 @@ #define SS_AM_WORKER (t_thrd.role == WORKER || t_thrd.role == THREADPOOL_WORKER || t_thrd.role == STREAM_WORKER) typedef enum SSBroadcastOp { - BCAST_CANCEL_TRX_FOR_SWITCHOVER = 0, + BCAST_GET_XMIN = 0, + BCAST_CANCEL_TRX_FOR_SWITCHOVER, BCAST_SI, BCAST_SEGDROPTL, BCAST_DROP_REL_ALL_BUFFER, @@ -179,7 +185,8 @@ typedef enum SSBroadcastOp { } SSBroadcastOp; typedef enum SSBroadcastOpAck { - BCAST_CANCEL_TRX_ACK = 0, + BCAST_GET_XMIN_ACK = 0, + BCAST_CANCEL_TRX_ACK, BCAST_CHECK_DB_BACKENDS_ACK, BCAST_ACK_END } SSBroadcastOpAck; diff --git a/src/include/ddes/dms/ss_transaction.h b/src/include/ddes/dms/ss_transaction.h index ba6d9f85e5..656f78e23c 100644 --- a/src/include/ddes/dms/ss_transaction.h +++ b/src/include/ddes/dms/ss_transaction.h @@ -100,7 +100,7 @@ CommitSeqNo SSTransactionIdGetCommitSeqNo(TransactionId transactionId, bool isCo void SSTransactionIdDidCommit(TransactionId transactionId, bool *ret_did_commit); void SSTransactionIdIsInProgress(TransactionId transactionId, bool *in_progress); TransactionId SSMultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask, uint16 t_infomask2); -bool SSGetOldestXminFromAllStandby(); +bool SSGetOldestXminFromAllStandby(TransactionId xmin, TransactionId xmax, CommitSeqNo csn); int SSGetOldestXmin(char *data, uint32 len, char *output_msg, uint32 *output_msg_len); int SSGetOldestXminAck(SSBroadcastXminAck *ack_data); void SSIsPageHitDms(RelFileNode& node, BlockNumber page, int pagesNum, uint64 *pageMap, int *bitCount); @@ -121,7 +121,7 @@ int SSCheckDbBackendsAck(char *data, unsigned int len); bool SSCheckDbBackendsFromAllStandby(Oid dbid); void SSStandbyUpdateRedirectInfo(); void SSSendLatestSnapshotToStandby(TransactionId xmin, TransactionId xmax, CommitSeqNo csn); -int SSUpdateLatestSnapshotOfStandby(char *data, uint32 len); +int SSUpdateLatestSnapshotOfStandby(char *data, uint32 len, char *output_msg, uint32 *output_msg_len); int SSReloadReformCtrlPage(uint32 len); void SSRequestAllStandbyReloadReformCtrlPage(); bool SSCanFetchLocalSnapshotTxnRelatedInfo(); diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index 267846f93f..fcf5f94915 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -126,6 +126,7 @@ typedef struct knl_instance_attr_dms { int32 sslog_max_file_size; //Unit:KB int parallel_thread_num; int32 txnstatus_cache_size; + bool enable_bcast_getoldestxmin; bool enable_bcast_snapshot; char* work_thread_pool_attr; int32 work_thread_pool_max_cnt; diff --git a/src/include/knl/knl_instance.h b/src/include/knl/knl_instance.h index 3441388dda..a1e0835653 100755 --- a/src/include/knl/knl_instance.h +++ b/src/include/knl/knl_instance.h @@ -1275,6 +1275,7 @@ typedef struct knl_g_datadir_context { typedef struct knl_g_dms_context { uint32 dmsProcSid; + uint64 xminAck; dms_status_t dms_status; ClusterNodeState SSClusterState; ss_reformer_ctrl_t SSReformerControl; // saved in disk; saved by primary diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index 1c591ae772..4b77ad67b9 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -650,6 +650,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c ss_dss_data_vg_name | string | | | ss_dss_xlog_vg_name | string | | | ss_enable_aio | bool | | | + ss_enable_bcast_getoldestxmin | bool | | | ss_enable_bcast_snapshot | bool | | | ss_enable_catalog_centralized | bool | | | ss_enable_dynamic_trace | bool | | | -- Gitee From c797d0f781fc7bd28ad361a998eaf71486405bd1 Mon Sep 17 00:00:00 2001 From: lyoursly Date: Tue, 27 Aug 2024 17:26:39 +0800 Subject: [PATCH 249/347] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E7=A1=AC=E4=BB=B6?= =?UTF-8?q?=E5=AF=86=E7=A0=81=E6=A8=A1=E5=9D=97=E6=8E=A5=E5=8F=A3=E5=BA=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GNUmakefile.in | 4 + contrib/common_cipher/Makefile | 14 + contrib/common_cipher/README.md | 18 + contrib/common_cipher/common_algo.cpp | 300 +++++++ contrib/common_cipher/common_algo.h | 124 +++ contrib/common_cipher/common_cipher.cpp | 538 ++++++++++++ contrib/common_cipher/common_cipher.h | 275 ++++++ contrib/common_cipher/common_err.cpp | 126 +++ contrib/common_cipher/common_err.h | 89 ++ .../common_internal_interfaces.cpp | 796 ++++++++++++++++++ .../common_internal_interfaces.h | 127 +++ contrib/common_cipher/common_utils.cpp | 155 ++++ contrib/common_cipher/common_utils.h | 52 ++ contrib/common_cipher/test.cpp | 321 +++++++ 14 files changed, 2939 insertions(+) create mode 100755 contrib/common_cipher/Makefile create mode 100644 contrib/common_cipher/README.md create mode 100755 contrib/common_cipher/common_algo.cpp create mode 100755 contrib/common_cipher/common_algo.h create mode 100755 contrib/common_cipher/common_cipher.cpp create mode 100755 contrib/common_cipher/common_cipher.h create mode 100755 contrib/common_cipher/common_err.cpp create mode 100755 contrib/common_cipher/common_err.h create mode 100755 contrib/common_cipher/common_internal_interfaces.cpp create mode 100755 contrib/common_cipher/common_internal_interfaces.h create mode 100755 contrib/common_cipher/common_utils.cpp create mode 100755 contrib/common_cipher/common_utils.h create mode 100755 contrib/common_cipher/test.cpp diff --git a/GNUmakefile.in b/GNUmakefile.in index cc5a7b9b0e..47a30ae1ff 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -63,6 +63,7 @@ install: $(MAKE) -C $(root_builddir)/distribute/kernel/extension/dimsearch $@ $(MAKE) -C contrib/security_plugin $@ $(MAKE) -C $(root_builddir)/distribute/kernel/extension/tsdb $@ + $(MAKE) -C contrib/common_cipher $@ +@echo "openGauss installation complete." else ifeq ($(enable_privategauss), yes) @@ -75,6 +76,7 @@ install: $(MAKE) -C contrib/hstore $@ $(MAKE) -C $(root_builddir)/privategauss/kernel/extension/packages $@ $(MAKE) -C $(root_builddir)/contrib/gsredistribute $@ + $(MAKE) -C contrib/common_cipher $@ +@echo "openGauss installation complete." else install: @@ -84,6 +86,7 @@ install: $(MAKE) -C contrib/postgres_fdw $@ $(MAKE) -C contrib/hstore $@ $(MAKE) -C $(root_builddir)/privategauss/kernel/extension/packages $@ + $(MAKE) -C contrib/common_cipher $@ +@echo "openGauss installation complete." endif else @@ -95,6 +98,7 @@ install: $(MAKE) -C contrib/hstore $@ $(MAKE) -C contrib/dblink $@ $(MAKE) -C contrib/ndpplugin $@ + $(MAKE) -C contrib/common_cipher $@ @if test -d contrib/spq_plugin; then $(MAKE) -C contrib/spq_plugin $@; fi @if test -d contrib/dolphin; then $(MAKE) -C contrib/dolphin $@; fi @if test -d contrib/age; then $(MAKE) -C contrib/age $@; fi diff --git a/contrib/common_cipher/Makefile b/contrib/common_cipher/Makefile new file mode 100755 index 0000000000..1fed3612d7 --- /dev/null +++ b/contrib/common_cipher/Makefile @@ -0,0 +1,14 @@ +# contrib/common_cipher/Makefile + +SRCS = common_err.cpp common_utils.cpp common_algo.cpp common_internal_interfaces.cpp common_cipher.cpp + +MODULE_big = common_cipher +OBJS = $(SRCS:.cpp=.o) + +subdir = contrib/common_cipher +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk + +override CPPFLAGS := -fPIC -shared +override CFLAGS := -fPIC -shared diff --git a/contrib/common_cipher/README.md b/contrib/common_cipher/README.md new file mode 100644 index 0000000000..67efc64ac6 --- /dev/null +++ b/contrib/common_cipher/README.md @@ -0,0 +1,18 @@ +动态库使用说明: +a.硬件动态库中目前适配的三种硬件信息如下: +三未信安密钥管理系统,型号SYT1306 +江南天安密钥管理系统,型号SJJ1988 +光电安辰PCI-E密码卡,型号TOEC-GMPCIE01 + +b.硬件动态库使用需要配置的入参 +指定所使用的硬件,必须项,取值范围["GDACCARD" "JNTAKMS" "SWXAKMS"]。 +指定具体硬件提供so所在路径,必须项。 +硬件配置文件所在路径:密钥管理系统需要配置此变量,指定kms的配置文件路径,可选项。 +江南天安配置文件只需要传入路径,三未信安需要带配置文件名称。 +配置示例: +MODLUE_TYPE=GDACCARD,MODLUE_LIB_PATH=/home/lib/libsdf.so +MODLUE_TYPE=JNTAKMS,MODLUE_LIB_PATH=/home/lib/libsdf.so,MODULE_CONFIG_FILE_PATH=/home/etc/ +MODLUE_TYPE=SWXAKMS,MODLUE_LIB_PATH=/home/lib/libsdf.so,MODULE_CONFIG_FILE_PATH=/home/etc/xxx.ini + +使用具体的接口详见:common_cipher.h + diff --git a/contrib/common_cipher/common_algo.cpp b/contrib/common_cipher/common_algo.cpp new file mode 100755 index 0000000000..65ac296d6f --- /dev/null +++ b/contrib/common_cipher/common_algo.cpp @@ -0,0 +1,300 @@ +#include "common_algo.h" +#include "common_cipher.h" +#include "common_err.h" +#include "common_utils.h" + +#define SUPPORTED 1 +#define UNSUPPORTED 0 + + +static void set_gdac_supported_feature(SupportedFeature *supported_feature) +{ + memcpy(supported_feature->provider_name, MODULE_GDAC_CARD_STR, strlen(MODULE_GDAC_CARD_STR)); + + /*光电安辰提供了扩展的生成内部KEK接口,可以支持索引*/ + supported_feature->key_type = KEY_TYPE_NAMEORIDX; + + supported_feature->supported_symm[MODULE_AES_128_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_AES_256_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_SM4_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_HMAC_SHA256] = SUPPORTED; + supported_feature->supported_symm[MODULE_HMAC_SM3] = SUPPORTED; + supported_feature->supported_symm[MODULE_DETERMINISTIC_KEY] = SUPPORTED; + + supported_feature->supported_digest[MODULE_SHA256] = SUPPORTED; + supported_feature->supported_digest[MODULE_SM3] = SUPPORTED; +} + +static void set_swxa_supported_feature(SupportedFeature *supported_feature) +{ + memcpy(supported_feature->provider_name, MODULE_SWXA_KMS_STR, strlen(MODULE_SWXA_KMS_STR)); + + /*三未信安提供了生成密钥密文的接口,可以支持密钥密文。*/ + supported_feature->key_type = KEY_TYPE_CIPHERTEXT; + + supported_feature->supported_symm[MODULE_AES_128_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_AES_256_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_SM4_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_AES_128_CTR] = SUPPORTED; + supported_feature->supported_symm[MODULE_AES_256_CTR] = SUPPORTED; + supported_feature->supported_symm[MODULE_SM4_CTR] = SUPPORTED; + supported_feature->supported_symm[MODULE_HMAC_SHA256] = SUPPORTED; + supported_feature->supported_symm[MODULE_HMAC_SM3] = SUPPORTED; + supported_feature->supported_symm[MODULE_DETERMINISTIC_KEY] = SUPPORTED; + + supported_feature->supported_digest[MODULE_SHA256] = SUPPORTED; + supported_feature->supported_digest[MODULE_SM3] = SUPPORTED; +} + + +static void set_jnta_supported_feature(SupportedFeature *supported_feature) +{ + memcpy(supported_feature->provider_name, MODULE_JNTA_KMS_STR, strlen(MODULE_JNTA_KMS_STR)); + + /*江南天安提供了生成密钥密文和导入密钥密文到指定索引的接口,可以支持密钥索引。*/ + supported_feature->key_type = KEY_TYPE_NAMEORIDX; + + supported_feature->supported_symm[MODULE_AES_128_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_AES_256_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_SM4_CBC] = SUPPORTED; + supported_feature->supported_symm[MODULE_HMAC_SHA256] = SUPPORTED; + supported_feature->supported_symm[MODULE_HMAC_SM3] = SUPPORTED; + supported_feature->supported_symm[MODULE_DETERMINISTIC_KEY] = SUPPORTED; + + supported_feature->supported_digest[MODULE_SHA256] = SUPPORTED; + supported_feature->supported_digest[MODULE_SM3] = SUPPORTED; +} + +int get_supported_feature(ModuleType type, SupportedFeature *supported_feature) +{ + if (supported_feature == NULL) { + return CRYPTO_MOD_PARAM_INVALID_ERR; + } + + memset(supported_feature->provider_name, 0x0, MAX_PROVIDER_NAME_LEN); + memset(supported_feature->supported_symm, UNSUPPORTED, sizeof(supported_feature->supported_symm)); + memset(supported_feature->supported_digest, UNSUPPORTED, sizeof(supported_feature->supported_digest)); + supported_feature->key_type = KEY_TYPE_INVALID; + + switch (type) { + case MODULE_GDAC_CARD_TYPE: + set_gdac_supported_feature(supported_feature); + break; + case MODULE_JNTA_KMS_TYPE: + set_jnta_supported_feature(supported_feature); + break; + case MODULE_SWXA_KMS_TYPE: + set_swxa_supported_feature(supported_feature); + break; + default: + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + + return CRYPT_MOD_OK; +} + +static int get_gdac_symm_algo_type(ModuleSymmKeyAlgo symmalgotype, unsigned int* realtype) +{ + switch (symmalgotype) { + case MODULE_AES_128_CBC: + *realtype = GDAC_AES128_CBC; + break; + case MODULE_AES_256_CBC: + *realtype = GDAC_AES256_CBC; + break; + case MODULE_SM4_CBC: + *realtype = GDAC_SM4_CBC; + break; + case MODULE_HMAC_SHA256: + *realtype = GDAC_HMAC_SHA256; + break; + case MODULE_HMAC_SM3: + *realtype = GDAC_HMAC_SM3; + break; + default: + return CRYPTO_MOD_UNSUPPORTED_SYMM_TYPE_ERR; + } + + return CRYPT_MOD_OK; +} + +static int get_swxa_symm_algo_type(ModuleSymmKeyAlgo symmalgotype, unsigned int* realtype) +{ + switch (symmalgotype) { + case MODULE_AES_128_CBC: + case MODULE_AES_256_CBC: + *realtype = SWXA_AES_CBC; + break; + case MODULE_AES_128_CTR: + case MODULE_AES_256_CTR: + *realtype = SWXA_AES_CTR; + break; + case MODULE_SM4_CBC: + *realtype = SWXA_SMS4_CBC; + break; + case MODULE_SM4_CTR: + *realtype = SWXA_SMS4_CTR; + break; + case MODULE_HMAC_SHA256: + *realtype = SWXA_SHA256; + break; + case MODULE_HMAC_SM3: + *realtype = SWXA_SM3; + break; + default: + return CRYPTO_MOD_UNSUPPORTED_SYMM_TYPE_ERR; + } + + return CRYPT_MOD_OK; +} + +static int get_jnta_symm_algo_type(ModuleSymmKeyAlgo symmalgotype, unsigned int* realtype, unsigned int* realmode) +{ + switch (symmalgotype) { + case MODULE_AES_128_CBC: + *realtype = TA_AES128; + *realmode = TA_CBC; + break; + case MODULE_AES_256_CBC: + *realtype = TA_AES256; + *realmode = TA_CBC; + break; + case MODULE_SM4_CBC: + *realtype = TA_SM4; + *realmode = TA_CBC; + break; + case MODULE_HMAC_SHA256: + *realtype = TA_HMAC_SHA256; + break; + case MODULE_HMAC_SM3: + *realtype = TA_HMAC_SM3; + break; + default: + return CRYPTO_MOD_UNSUPPORTED_SYMM_TYPE_ERR; + } + + return CRYPT_MOD_OK; +} + +int get_real_symm_algo_type(ModuleType moduletype, ModuleSymmKeyAlgo symmalgotype, unsigned int* realtype, unsigned int* realmode) +{ + /*严格加解密,不属于硬件密码模块内部算法类型,在库中直接自行处理*/ + if (symmalgotype == MODULE_DETERMINISTIC_KEY) { + *realtype = MODULE_DETERMINISTIC_KEY; + + return CRYPT_MOD_OK; + } + + switch (moduletype) { + case MODULE_GDAC_CARD_TYPE: + return get_gdac_symm_algo_type(symmalgotype, realtype); + case MODULE_JNTA_KMS_TYPE: + return get_jnta_symm_algo_type(symmalgotype, realtype, realmode); + case MODULE_SWXA_KMS_TYPE: + return get_swxa_symm_algo_type(symmalgotype, realtype); + default: + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + + return CRYPT_MOD_OK; +} + +void transform_jnta_algo_type(unsigned int type, unsigned int mode, unsigned int *standardtype) +{ + switch (type) { + case TA_AES128: + case TA_AES256: + if (mode == TA_CBC) { + *standardtype = TA_AES_CBC; + } + break; + case TA_SM4: + if (mode == TA_CBC) { + *standardtype = TA_SM4_CBC; + } + break; + default: + break; + } +} + +static int get_gdac_digest_algo_type(ModuleDigestAlgo type, unsigned int* realtype) +{ + switch (type) { + case MODULE_SHA256: + *realtype = GDAC_SHA256; + break; + case MODULE_SM3: + *realtype = GDAC_SM3; + break; + default: + return CRYPTO_MOD_UNSUPPORTED_DIGEST_TYPE_ERR; + } + + return CRYPT_MOD_OK; +} + +static int get_swxa_digest_algo_type(ModuleDigestAlgo type, unsigned int* realtype) +{ + switch (type) { + case MODULE_SHA256: + *realtype = SWXA_SHA256; + break; + case MODULE_SM3: + *realtype = SWXA_SM3; + break; + default: + return CRYPTO_MOD_UNSUPPORTED_DIGEST_TYPE_ERR; + } + + return CRYPT_MOD_OK; +} + +static int get_jnta_digest_algo_type(ModuleDigestAlgo type, unsigned int* realtype) +{ + switch (type) { + case MODULE_SHA256: + *realtype = TA_SHA256; + break; + case MODULE_SM3: + *realtype = TA_SM3; + break; + default: + return CRYPTO_MOD_UNSUPPORTED_DIGEST_TYPE_ERR; + } + + return CRYPT_MOD_OK; +} + +int get_real_digest_algo_type(ModuleType moduletype, ModuleDigestAlgo type, unsigned int* realtype) +{ + switch (moduletype) { + case MODULE_GDAC_CARD_TYPE: + return get_gdac_digest_algo_type(type, realtype); + case MODULE_JNTA_KMS_TYPE: + return get_jnta_digest_algo_type(type, realtype); + case MODULE_SWXA_KMS_TYPE: + return get_swxa_digest_algo_type(type, realtype); + default: + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + + return CRYPT_MOD_OK; +} + +int get_key_len_by_algo_type(ModuleSymmKeyAlgo type) +{ + switch (type) { + case MODULE_AES_128_CBC: + case MODULE_AES_128_CTR: + case MODULE_SM4_CBC: + case MODULE_SM4_CTR: + return INTERNAL_KEY_128_BITS; + case MODULE_AES_256_CBC: + case MODULE_AES_256_CTR: + return INTERNAL_KEY_256_BITS; + default: + return INTERNAL_KEY_128_BITS; + } + +} diff --git a/contrib/common_cipher/common_algo.h b/contrib/common_cipher/common_algo.h new file mode 100755 index 0000000000..9b88e40300 --- /dev/null +++ b/contrib/common_cipher/common_algo.h @@ -0,0 +1,124 @@ +#ifndef COMMON_ALGO_H +#define COMMON_ALGO_H + +#include "common_cipher.h" +#include "common_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*密钥索引最大值1024,最小值1*/ + +#define INTERNAL_MIN_INDEX_NUM 1 +#define INTERNAL_MAX_INDEX_NUM 1024 + +/*光电安辰相关宏定义*/ + +/*光电安辰卡内密钥类型*/ +#define GDAC_KEK_TYPE 9 + +/*光电安辰卡内密钥状态*/ +#define GDAC_KEY_EXIST 0x100000 +#define GDAC_KEY_NOT_EXIST 0 + + +/*对称算法模式*/ +#define GDAC_MODE_CBC 0x00000002 + +/*对称算法类型*/ +#define GDAC_SM4 0x00000400 +#define GDAC_AES128 0x00004000 +#define GDAC_AES256 0x00008000 + +/*对称算法及模式*/ +#define GDAC_SM4_CBC (GDAC_SM4|GDAC_MODE_CBC) +#define GDAC_AES128_CBC (GDAC_AES128|GDAC_MODE_CBC) +#define GDAC_AES256_CBC (GDAC_AES256|GDAC_MODE_CBC) + +/*摘要算法*/ +#define GDAC_SM3 0x00000001 +#define GDAC_SHA256 0x00000004 + +/*HMAC算法*/ +#define GDAC_HMAC_SM3 0x00100001 +#define GDAC_HMAC_SHA256 0x00100004 + +/*三未信安相关宏定义*/ + +/*对称算法及模式*/ +#define SWXA_SMS4_CBC 0x00000402 +#define SWXA_SMS4_CTR 0x00000420 + +#define SWXA_AES_CBC 0x00002002 +#define SWXA_AES_CTR 0x00002020 + +/*摘要算法,三未的hmac指定算法和摘要算法相同即可*/ +#define SWXA_SM3 0x00000001 +#define SWXA_SHA256 0x00000004 + +/*江南天安相关*/ + +typedef enum { + TA_HMAC_SHA256 = 6, + TA_HMAC_SM3 = 20, +}TA_HMAC_ALG; + +typedef enum { + TA_AES128 = 3, + TA_AES256 = 5, + TA_SM4 = 7, +}TA_SYMM_ALG; + +typedef enum { + TA_CBC = 1, +}TA_SYMM_MODE; + +typedef enum { + TA_SM3 = 1, + TA_SHA256 = 4, +}TA_HASH_ALG; + + +#define TA_SM4_CBC 0X00000402 +#define TA_AES_CBC 0x80000202 + +/*内部通用宏定义*/ +#define INTERNAL_DO_ENC 1 +#define INTERNAL_DO_DEC 0 + +#define INTERNAL_MAX_KEY_LEN 32 + +#define INTERNAL_BLOCK_LEN 16 + +#define INTERNAL_IV_LEN 16 + +#define INTERNAL_HMAC_LEN 32 + +#define INTERNAL_MSG_BLOCK_LEN 8192 + +#define INTERNAL_KEY_128_BITS 128 +#define INTERNAL_KEY_256_BITS 256 + + +typedef struct { + void *session; /*和硬件建立的会话*/ + unsigned int algotype; /*算法类型*/ + unsigned int algomode; /*算法模式,江南天安使用;光电安辰和三未信安algotype已包含模式*/ + int enc; /*0表示解密,1表示加密*/ + unsigned int keysize; /*key的长度*/ + unsigned char key[INTERNAL_MAX_KEY_LEN]; /*存储密钥id/密钥密文值/密钥明文值,AES256密钥长度32*/ +}InternalKeyCtx; + + +extern int get_supported_feature(ModuleType type, SupportedFeature *supported_feature); +extern int get_real_symm_algo_type(ModuleType moduletype, ModuleSymmKeyAlgo symmalgotype, unsigned int* realtype, unsigned int* realmode); +extern int get_real_digest_algo_type(ModuleType moduletype, ModuleDigestAlgo type, unsigned int* realtype); +extern int get_key_len_by_algo_type(ModuleSymmKeyAlgo type); +extern void transform_jnta_algo_type(unsigned int type, unsigned int mode, unsigned int *standardtype); + +#ifdef __cplusplus +} +#endif + +#endif /* COMMON_ALGO_H */ diff --git a/contrib/common_cipher/common_cipher.cpp b/contrib/common_cipher/common_cipher.cpp new file mode 100755 index 0000000000..a020fd63b5 --- /dev/null +++ b/contrib/common_cipher/common_cipher.cpp @@ -0,0 +1,538 @@ +#include "common_cipher.h" +#include "common_err.h" +#include "common_utils.h" +#include "common_internal_interfaces.h" + +/** 初始化密码模块 + * + * @param[in] + * load_info 密码模块相关信息(硬件设备口令,硬件设备、硬件库路径等),通过kv方式传入 + * + * @param[out] + * supported_feature 返回当前密码模块支持的加密方式,参考上述结构体 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + * 示例: + * 用户设置GUC参数crypto_module_info = 'enable_crypto_module=on,module_third_msg=aaa' + * 传入接口时load_info = 'aaa' + */ +int crypto_module_init(char *load_info, SupportedFeature *supported_feature) +{ + int ret = CRYPT_MOD_OK; + + ModuleParams moduleparams; + + memset(&moduleparams, 0x0, sizeof(ModuleParams)); + + /* 1.解析load_info获取加载密码模块的参数*/ + ret = parse_module_params(load_info, &moduleparams); + if (ret != CRYPT_MOD_OK) { + goto err; + } + + /* 2.根据密码模块类型填充supported_feature*/ + ret = get_supported_feature(moduleparams.moduletype, supported_feature); + if (ret != CRYPT_MOD_OK) { + + goto err; + } + + /* 3.dlopen打开密码模块驱动库,并加载接口函数*/ + ret = load_module_driver(moduleparams); + if (ret != CRYPT_MOD_OK) { + goto err; + } + + /* 4.打开设备*/ + ret = internal_open_device(moduleparams.cfgfilepath); + if (ret != CRYPT_MOD_OK) { + goto err; + } + + return CRYPT_MOD_OK; +err: + /*可能已经打开/加载,尝试释放*/ + (void)internal_close_device(); + (void)unload_module_driver(); + set_thread_errno(ret); + return ret; + +} + +/** 会话中连接密码模块 + * + * @param[in] + * key_info 密码相关信息(用户口令等信息),通过kv方式传入 + * + * @param[out] + * sess 会话信息 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + * 示例: + * 用户设置GUC参数tde_key_info = 'keyType=third_kms, keyThirdMsg =bbb' + * 传入接口时key_info = 'bbb' + */ +int crypto_module_sess_init(char *key_info, void **sess) +{ + int ret = CRYPT_MOD_OK; + + ret = internal_open_session(sess); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + } + + return ret; +} + +/** 会话中断开连接密码模块 + * + * @param[in] + * sess 会话信息 + * + * @param[out] + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +void crypto_module_sess_exit(void *sess) +{ + int ret = CRYPT_MOD_OK; + + ret = internal_close_session(sess); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + } + +} + +/** 创建密钥 + * + * @param[in] + * sess 会话信息 + * algo 密钥使用场景的算法 + * + * @param[out] + * key_id 返回生成密钥/密钥ID/密钥密文 + * key_id_size 返回生成内容长度 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_create_symm_key(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size) +{ + int ret = CRYPT_MOD_OK; + + ret = internal_generate_symm_key(sess, algo, key_id, (unsigned int*)key_id_size); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + } + + return ret; +} + +/** 密钥上下文初始化,后续进行加解密可直接使用上下文 + * + * @param[in] + * sess 会话信息 + * algo 加密算法 + * enc 加密1、解密0 + * key_id 密码信息 + * key_id_size 密码信息长度 + * @param[out] + * ctx 返回使用密钥信息 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_ctx_init(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size) +{ + InternalKeyCtx *keyctx = NULL; + int ret = CRYPT_MOD_OK; + + if (sess == NULL) { + return CRYPTO_MOD_NOT_OPENSESSION_ERR; + } + + if (enc != INTERNAL_DO_DEC && enc != INTERNAL_DO_ENC) { + return CRYPTO_MOD_INVALID_CRYPTO_TYPE_ERR; + } + + if (key_id == NULL || key_id[0] == '\0' || key_id_size <= 0 || key_id_size > INTERNAL_MAX_KEY_LEN) { + return CRYPTO_MOD_INVALID_KEY_ERR; + } + + keyctx = (InternalKeyCtx*)malloc(sizeof(InternalKeyCtx)); + + keyctx->session = sess; + + ret = get_real_symm_algo_type(get_current_module_type(), algo, &(keyctx->algotype), &(keyctx->algomode)); + if (ret != CRYPT_MOD_OK) { + free(keyctx); + return ret; + } + + keyctx->enc = enc; + + memset(keyctx->key, 0x0, sizeof(keyctx->key)); + memcpy(keyctx->key, key_id, key_id_size); + keyctx->keysize = key_id_size; + + *ctx = keyctx; + + return CRYPT_MOD_OK; +} + +/** 获取数据加解密后的数据长度 + * + * @param[in] + * ctx 加解密上下文信息 + * enc 加密1、解密0 + * data_size 返回加解密结果长度 + * @return 返回数据长度 + * + */ +int crypto_result_size(void *ctx, int enc, size_t data_size) +{ + if (ctx == NULL) { + return CRYPTO_MOD_INVALID_KEY_CTX_ERR; + } + + if (enc == INTERNAL_DO_DEC) { + return data_size; + } else if (enc == INTERNAL_DO_ENC) { + return (data_size + INTERNAL_BLOCK_LEN); + } else + return CRYPTO_MOD_INVALID_CRYPTO_TYPE_ERR; +} + +/** 密钥上下文清理 + * + * @param[in] + * ctx 加解密上下文信息 + * @param[out] + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +void crypto_ctx_clean(void *ctx) +{ + if (ctx) { + free(ctx); + ctx = NULL; + } +} + +/** 执行加解密 + * + * @param[in] + * ctx 加解密上下文信息 + * enc 加密1、解密0 + * data 原数据信息 + * data_size 原数据长度 + * iv iv信息 + * iv_size iv信息长度 + * tag GCM模式的校验值 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_encrypt_decrypt(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag) +{ + int ret = CRYPT_MOD_OK; + + if ((enc == INTERNAL_DO_ENC && *result_size < (data_size + INTERNAL_BLOCK_LEN)) + || (enc == INTERNAL_DO_DEC && *result_size < data_size)) { + set_thread_errno(CRYPTO_MOD_NOT_ENOUGH_SPACE_ERR); + return CRYPTO_MOD_NOT_ENOUGH_SPACE_ERR; + } + + if (enc == INTERNAL_DO_ENC) { + ret = internal_symm_encrypt(ctx, data, data_size, iv, iv_size, result, (unsigned int*)result_size, tag); + } else if (enc == INTERNAL_DO_DEC) { + ret = internal_symm_decrypt(ctx, data, data_size, iv, iv_size, result, (unsigned int*)result_size, tag); + } + + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + } + + return ret; + +} + +/** 计算摘要 + * + * @param[in] + * sess 会话信息 + * algo 摘要算法 + * data 原数据信息 + * data_size 原数据长度 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_digest(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size) +{ + int ret = CRYPT_MOD_OK; + + ret = internal_digest(sess, algo, data, data_size, result, (unsigned int*)result_size); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + } + + return ret; +} + +/** hmac初始化 + * + * @param[in] + * sess 会话信息 + * algo 摘要算法 + * key_id 密码信息 + * key_id_size 密码信息长度 + * @param[out] + * ctx 返回密钥上下文 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_hmac_init(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size) +{ + InternalKeyCtx *keyctx = NULL; + int ret = CRYPT_MOD_OK; + + if (sess == NULL) { + return CRYPTO_MOD_NOT_OPENSESSION_ERR; + } + + if (key_id == NULL || key_id[0] == '\0' || key_id_size <= 0 || key_id_size > INTERNAL_MAX_KEY_LEN) { + return CRYPTO_MOD_INVALID_KEY_ERR; + } + + keyctx = (InternalKeyCtx*)malloc(sizeof(InternalKeyCtx)); + + + keyctx->session = sess; + + ret = get_real_symm_algo_type(get_current_module_type(), algo, &(keyctx->algotype), &(keyctx->algomode)); + if (ret != CRYPT_MOD_OK) { + free(keyctx); + return ret; + } + + memcpy(keyctx->key, key_id, key_id_size); + keyctx->keysize = key_id_size; + + *ctx = keyctx; + + return ret; +} + +/** hmac清理 + * + * @param[in] + * ctx 密钥上下文信息 + * + * @param[out] + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +void crypto_hmac_clean(void *ctx) +{ + if (ctx) { + free(ctx); + ctx = NULL; + } +} + +/** 执行hmac计算 + * + * @param[in] + * ctx 密钥上下文信息 + * data 原数据信息 + * data_size 原数据长度 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_hmac(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size) +{ + int ret = CRYPT_MOD_OK; + + ret = internal_hmac(ctx, data, data_size, result, result_size); + if (ret != CRYPT_MOD_OK) + set_thread_errno(ret); + + return ret; +} + +/** 生成随机数 + * + * @param[in] + * sess 会话信息 + * size 申请的随机信息长度 + * + * @param[out] + * buffer 返回随机信息 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_gen_random(void *sess, char *buffer, size_t size) +{ + int ret = CRYPT_MOD_OK; + + ret = internal_generate_random(sess, buffer, size); + if (ret != CRYPT_MOD_OK) + set_thread_errno(ret); + + return ret; + +} + +/** 执行确定性加解密 + * + * @param[in] + * sess 会话信息 + * enc 加密1、解密0 + * data 原数据信息 + * data_size 原数据长度 + * key_id 密钥信息 + * key_id_size 密钥信息长度 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_deterministic_enc_dec(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size) +{ + int ret = CRYPT_MOD_OK; + void *tmpkeyctx = NULL; + unsigned char tmpiv[INTERNAL_IV_LEN + 1] = {0}; + + ret = crypto_ctx_init(sess, &tmpkeyctx, MODULE_SM4_CBC, enc, key_id, key_id_size); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + return ret; + } + + /*加密:对明文计算hmac作为iv,并将hmac放到密文串头; + *解密:从密文头获取iv和hmac,并对解密出的明文计算hmac进行校验*/ + + if (enc == INTERNAL_DO_ENC) { + void *tmphmacctx = NULL; + unsigned char tmphmac[INTERNAL_HMAC_LEN] = {0}; + long unsigned int hmaclen = 0; + + /*计算iv */ + ret = crypto_hmac_init(sess, &tmphmacctx, MODULE_HMAC_SM3, key_id, key_id_size); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + crypto_ctx_clean(tmpkeyctx); + return ret; + } + + ret = crypto_hmac(tmphmacctx, data, data_size, tmphmac, &hmaclen); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + crypto_ctx_clean(tmphmacctx); + crypto_ctx_clean(tmpkeyctx); + return ret; + } + + memcpy(tmpiv, tmphmac, INTERNAL_IV_LEN); + + /*把hmac放到密文头 */ + memcpy(result, tmphmac, INTERNAL_HMAC_LEN); + + crypto_hmac_clean(tmphmacctx); + + + ret = internal_symm_encrypt(tmpkeyctx, data, data_size, tmpiv, INTERNAL_IV_LEN, result + INTERNAL_HMAC_LEN, (unsigned int*)result_size, NULL); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + } + + crypto_ctx_clean(tmpkeyctx); + + *result_size += INTERNAL_HMAC_LEN; + + return ret; + + } else if (enc == INTERNAL_DO_DEC){ + void *tmphmacctx = NULL; + unsigned char tmphmac[INTERNAL_HMAC_LEN] = {0}; + long unsigned int hmaclen = 0; + unsigned char verifyhmac[INTERNAL_HMAC_LEN] = {0}; + /*获取iv */ + memcpy(tmpiv, data, INTERNAL_IV_LEN); + + /*获取hmac*/ + memcpy(verifyhmac, data, INTERNAL_HMAC_LEN); + + ret = internal_symm_decrypt(tmpkeyctx, data + INTERNAL_HMAC_LEN, data_size - INTERNAL_HMAC_LEN, tmpiv, INTERNAL_IV_LEN, result, (unsigned int*)result_size, NULL); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + crypto_ctx_clean(tmpkeyctx); + return ret; + } + + crypto_ctx_clean(tmpkeyctx); + + /*计算明文hmac */ + ret = crypto_hmac_init(sess, &tmphmacctx, MODULE_HMAC_SM3, key_id, key_id_size); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + return ret; + } + + ret = crypto_hmac(tmphmacctx, result, *result_size, tmphmac, &hmaclen); + if (ret != CRYPT_MOD_OK) { + set_thread_errno(ret); + crypto_ctx_clean(tmphmacctx); + return ret; + } + + crypto_hmac_clean(tmphmacctx); + + /*校验明文hmac值 */ + if (strncmp((char*)tmphmac, (char*)verifyhmac, INTERNAL_HMAC_LEN)) { + set_thread_errno(CRYPTO_MOD_DETERMINISTIC_DEC_VERIFY_ERR); + return CRYPTO_MOD_DETERMINISTIC_DEC_VERIFY_ERR; + } + + return ret; + } + + return ret; + +} + +/** 获取报错信息 + * + * @param[in] + * sess 会话信息 + * @param[out] + * errmsg 返回结果信息,最长256字节 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_get_errmsg(void *sess, char *errmsg) +{ + strcpy(errmsg, common_get_errmsg()); + + return CRYPT_MOD_OK; +} + + diff --git a/contrib/common_cipher/common_cipher.h b/contrib/common_cipher/common_cipher.h new file mode 100755 index 0000000000..7b0a5738ed --- /dev/null +++ b/contrib/common_cipher/common_cipher.h @@ -0,0 +1,275 @@ +#ifndef COMMON_CIPHER_H +#define COMMON_CIPHER_H +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_PROVIDER_NAME_LEN 128 +#define MAX_ERRMSG_LEN 256 + +typedef enum { + MODULE_AES_128_CBC = 0, + MODULE_AES_128_CTR, + MODULE_AES_128_GCM, + MODULE_AES_256_CBC, + MODULE_AES_256_CTR, + MODULE_AES_256_GCM, + MODULE_SM4_CBC, + MODULE_SM4_CTR, + MODULE_HMAC_SHA256, + MODULE_HMAC_SM3, + MODULE_DETERMINISTIC_KEY, + MODULE_ALGO_MAX = 1024 +} ModuleSymmKeyAlgo; + + +typedef enum { + MODULE_SHA256 = 0, + MODULE_SM3, + MODULE_DIGEST_MAX = 1024 +} ModuleDigestAlgo; + + +typedef enum { + KEY_TYPE_INVALID, + KEY_TYPE_PLAINTEXT, + KEY_TYPE_CIPHERTEXT, + KEY_TYPE_NAMEORIDX, + KEY_TYPE_MAX +} KeyType; + + +typedef struct { + char provider_name[MAX_PROVIDER_NAME_LEN]; + KeyType key_type; + int supported_symm[MODULE_ALGO_MAX]; // 不支持算法填入0或者支持算法填入1 + int supported_digest[MODULE_DIGEST_MAX]; // 不支持算法填入0或者支持算法填入1 +} SupportedFeature; + + +/** 初始化密码模块 + * + * @param[in] + * load_info 密码模块相关信息(硬件设备口令,硬件设备、硬件库路径等),通过kv方式传入 + * + * @param[out] + * supported_feature 返回当前密码模块支持的加密方式,参考上述结构体 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + * 示例: + * 用户设置GUC参数crypto_module_info = 'enable_crypto_module=on,module_third_msg=aaa' + * 传入接口时load_info = 'aaa' + */ +int crypto_module_init(char *load_info, SupportedFeature *supported_feature); + +/** 会话中连接密码模块 + * + * @param[in] + * key_info 密码相关信息(用户口令等信息),通过kv方式传入 + * + * @param[out] + * sess 会话信息 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + * 示例: + * 用户设置GUC参数tde_key_info = 'keyType=third_kms, keyThirdMsg =bbb' + * 传入接口时key_info = 'bbb' + */ +int crypto_module_sess_init(char *key_info, void **sess); + +/** 会话中断开连接密码模块 + * + * @param[in] + * sess 会话信息 + * + * @param[out] + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +void crypto_module_sess_exit(void *sess); + +/** 创建密钥 + * + * @param[in] + * sess 会话信息 + * algo 密钥使用场景的算法 + * + * @param[out] + * key_id 返回生成密钥/密钥ID/密钥密文 + * key_id_size 返回生成内容长度 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_create_symm_key(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size); + +/** 密钥上下文初始化,后续进行加解密可直接使用上下文 + * + * @param[in] + * sess 会话信息 + * algo 加密算法 + * enc 加密1、解密0 + * key_id 密码信息 + * key_id_size 密码信息长度 + * @param[out] + * ctx 返回使用密钥信息 + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_ctx_init(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size); + +/** 获取数据加解密后的数据长度 + * + * @param[in] + * ctx 加解密上下文信息 + * enc 加密1、解密0 + * data_size 返回加解密结果长度 + * @return 返回数据长度 + * + */ +int crypto_result_size(void *ctx, int enc, size_t data_size); + +/** 密钥上下文清理 + * + * @param[in] + * ctx 加解密上下文信息 + * @param[out] + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +void crypto_ctx_clean(void *ctx); + +/** 执行加解密 + * + * @param[in] + * ctx 加解密上下文信息 + * enc 加密1、解密0 + * data 原数据信息 + * data_size 原数据长度 + * iv iv信息 + * iv_size iv信息长度 + * tag GCM模式的校验值 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_encrypt_decrypt(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); + +/** 计算摘要 + * + * @param[in] + * sess 会话信息 + * algo 摘要算法 + * data 原数据信息 + * data_size 原数据长度 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_digest(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); + +/** hmac初始化 + * + * @param[in] + * sess 会话信息 + * algo 摘要算法 + * key_id 密码信息 + * key_id_size 密码信息长度 + * @param[out] + * ctx 返回密钥上下文 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_hmac_init(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); + +/** hmac清理 + * + * @param[in] + * ctx 密钥上下文信息 + * + * @param[out] + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +void crypto_hmac_clean(void *ctx); + +/** 执行hmac计算 + * + * @param[in] + * ctx 密钥上下文信息 + * data 原数据信息 + * data_size 原数据长度 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_hmac(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); + +/** 生成随机数 + * + * @param[in] + * sess 会话信息 + * size 申请的随机信息长度 + * + * @param[out] + * buffer 返回随机信息 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_gen_random(void *sess, char *buffer, size_t size); + +/** 执行确定性加解密 + * + * @param[in] + * sess 会话信息 + * enc 加密1、解密0 + * data 原数据信息 + * data_size 原数据长度 + * key_id 密钥信息 + * key_id_size 密钥信息长度 + * @param[out] + * result 返回结果信息 + * result_size 返回结果信息长度 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_deterministic_enc_dec(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); + +/** 获取报错信息 + * + * @param[in] + * sess 会话信息 + * @param[out] + * errmsg 返回结果信息,最长256字节 + * + * @return 成功返回CRYPT_MOD_OK,失败返回错误码 + * + */ +int crypto_get_errmsg(void *sess, char *errmsg); + +#ifdef __cplusplus +} +#endif + +#endif /*COMMON_CIPHER_H*/ \ No newline at end of file diff --git a/contrib/common_cipher/common_err.cpp b/contrib/common_cipher/common_err.cpp new file mode 100755 index 0000000000..748018bf42 --- /dev/null +++ b/contrib/common_cipher/common_err.cpp @@ -0,0 +1,126 @@ +#include "common_err.h" + +#define MAX_ERRMSG_LEN 256 + +#define lengthof(array) (sizeof(array) / sizeof(array[0])) + +static thread_local int internalthreaderrno = 0; + +static void reset_thread_errno(); + +typedef struct { + int errno; + char errmsg[MAX_ERRMSG_LEN]; +}CommonErrInfo; + +/*HSM:hardware security module,硬件安全模块。*/ +static const CommonErrInfo internal_err_info[] = { + {INTERNAL_UNKNOWNERR, "HSM unknown error"}, + {INTERNAL_NOTSUPPORT, "HSM not support"}, + {INTERNAL_COMMFAIL, "HSM communication error"}, + {INTERNAL_HARDFAIL, "HSM hard error"}, + {INTERNAL_OPENDEVICE, "HSM open device error"}, + {INTERNAL_OPENSESSION, "HSM open session error"}, + {INTERNAL_PARDENY, "HSM permission error"}, + {INTERNAL_KEYNOTEXIST, "HSM key not exist"}, + {INTERNAL_ALGNOTSUPPORT, "HSM not support algorithm"}, + {INTERNAL_ALGMODNOTSUPPORT, "HSM not support algorithm mode"}, + {INTERNAL_PKOPERR, "HSM public key operation error"}, + {INTERNAL_SKOPERR, "HSM private key operation error"}, + {INTERNAL_SIGNERR, "HSM sign error"}, + {INTERNAL_VERIFYERR, "HSM verify error"}, + {INTERNAL_SYMOPERR, "HSM symmetry operation error"}, + {INTERNAL_STEPERR, "HSM step error"}, + {INTERNAL_FILESIZEERR, "HSM file size or data len error"}, + {INTERNAL_FILENOEXIST, "HSM file not exist"}, + {INTERNAL_FILEOFSERR, "HSM file offset operation error"}, + {INTERNAL_KEYTYPEERR, "HSM key type error"}, + {INTERNAL_KEYERR, "HSM key error"}, + {INTERNAL_ENCDATAERR, "HSM encrypt data error"}, + {INTERNAL_RANDERR, "HSM random error"}, + {INTERNAL_PRKRERR, "HSM private access right error"}, + {INTERNAL_MACERR, "HSM MAC error"}, + {INTERNAL_FILEEXISTS, "HSM file exists"}, + {INTERNAL_FILEWERR, "HSM write file error"}, + {INTERNAL_NOBUFFER, "HSM not enough storage"}, + {INTERNAL_INARGERR, "HSM input param error"}, + {INTERNAL_OUTARGERR, "HSM output param error"}, + {INTERNAL_UKEYERR, "HSM ukey error"}, + {INTERNAL_GENKEYERR, "HSM generate key error"}, + {INTERNAL_STATEERR, "HSM status error"}, + {INTERNAL_RETRYERR, "HSM retry exceeded"}, + {INTERNAL_DEVICE_BUSY, "HSM is busy"} +}; + +static int internal_err_number = lengthof(internal_err_info); + +static const CommonErrInfo common_err_info[] = { + {CRYPTO_MOD_TYPE_REPEATED_ERR, "module type set repeated"}, + {CRYPTO_MOD_TYPE_INVALID_ERR, "invalid module type"}, + {CRYPTO_MOD_LIBPATH_REPEATED_ERR, "module lib path set repeated"}, + {CRYPTO_MOD_LIBPATH_INVALID_ERR, "invalid module lib path"}, + {CRYPTO_MOD_CFG_PATH_REPEATED_ERR, "module config file set repeated"}, + {CRYPTO_MOD_CFG_PATH_INVALID_ERR, "invalid module config file"}, + {CRYPTO_MOD_PARAM_TOO_MANY_ERR, "param is too many"}, + {CRYPTO_MOD_PARAM_INVALID_ERR, "invalid param"}, + {CRYPTO_MOD_UNSUPPORTED_SYMM_TYPE_ERR, "unsupported symm algo type"}, + {CRYPTO_MOD_UNSUPPORTED_DIGEST_TYPE_ERR, "unsupported digest algo type"}, + {CRYPTO_MOD_DLOPEN_ERR, "dlopen error"}, + {CRYPTO_MOD_DLSYM_ERR, "dlsym error"}, + {CRYPTO_MOD_UNLOAD_ERR, "unload error"}, + {CRYPTO_MOD_NOT_LOADED_ERR, "module not loaded"}, + {CRYPTO_MOD_NOT_OPENDEVICE_ERR, "device not opened"}, + {CRYPTO_MOD_NOT_OPENSESSION_ERR, "session not opened"}, + {CRYPTO_MOD_INVALID_KEY_ERR, "invalid key"}, + {CRYPTO_MOD_INVALID_CRYPTO_TYPE_ERR, "invalid crypto type"}, + {CRYPTO_MOD_INVALID_KEY_CTX_ERR, "invalid key ctx"}, + {CRYPTO_MOD_UNPADDING_ERR, "unpadding err"}, + {CRYPTO_MOD_NOT_ENOUGH_SPACE_ERR, "not enough space"}, + {CRYPTO_MOD_DETERMINISTIC_DEC_VERIFY_ERR, "deterministic dec verify error"}, + {CRYPTO_MOD_UNKNOWN_PARAM_ERR, "unknown module param"} +}; + +static int common_err_number = lengthof(common_err_info); + +static const char* unknown_err = "unknown err"; + +const char* common_get_errmsg() +{ + int i = 0; + + if (internalthreaderrno & INTERNAL_BASE_ERR) { + for (i = 0; i < internal_err_number; i++) { + if (internalthreaderrno == internal_err_info[i].errno) { + reset_thread_errno(); + return internal_err_info[i].errmsg; + } + } + } else if (internalthreaderrno & CRYPTO_MOD_BASE_ERR) { + for (i = 0; i < common_err_number; i++) { + if (internalthreaderrno == common_err_info[i].errno) { + reset_thread_errno(); + return common_err_info[i].errmsg; + } + } + } else { + reset_thread_errno(); + return unknown_err; + } + + if (i != internal_err_number && i != common_err_number) { + reset_thread_errno(); + return unknown_err; + } + + return unknown_err; +} + +void set_thread_errno(int errno) +{ + internalthreaderrno = errno; +} + +static void reset_thread_errno() +{ + internalthreaderrno = 0; +} diff --git a/contrib/common_cipher/common_err.h b/contrib/common_cipher/common_err.h new file mode 100755 index 0000000000..8e8f816e2e --- /dev/null +++ b/contrib/common_cipher/common_err.h @@ -0,0 +1,89 @@ +#ifndef COMMON_ERR_H +#define COMMON_ERR_H + + +#ifdef __cplusplus +extern "C" { +#endif + +/*硬件内部错误码*/ +#define INTERNAL_OK 0 + +#define INTERNAL_BASE_ERR 0x01000000 + +#define INTERNAL_UNKNOWNERR (INTERNAL_BASE_ERR + 0x00000001) /* 未知错误 */ +#define INTERNAL_NOTSUPPORT (INTERNAL_BASE_ERR + 0x00000002) /* 不支持 */ +#define INTERNAL_COMMFAIL (INTERNAL_BASE_ERR + 0x00000003) /* 通信错误 */ +#define INTERNAL_HARDFAIL (INTERNAL_BASE_ERR + 0x00000004) /* 硬件错误 */ +#define INTERNAL_OPENDEVICE (INTERNAL_BASE_ERR + 0x00000005) /* 打开设备错误 */ +#define INTERNAL_OPENSESSION (INTERNAL_BASE_ERR + 0x00000006) /* 打开会话句柄错误 */ +#define INTERNAL_PARDENY (INTERNAL_BASE_ERR + 0x00000007) /* 权限不满足 */ +#define INTERNAL_KEYNOTEXIST (INTERNAL_BASE_ERR + 0x00000008) /* 密钥不存在 */ +#define INTERNAL_ALGNOTSUPPORT (INTERNAL_BASE_ERR + 0x00000009) /* 不支持的算法 */ +#define INTERNAL_ALGMODNOTSUPPORT (INTERNAL_BASE_ERR + 0x0000000A) /* 不支持的算法模式 */ +#define INTERNAL_PKOPERR (INTERNAL_BASE_ERR + 0x0000000B) /* 公钥运算错误 */ +#define INTERNAL_SKOPERR (INTERNAL_BASE_ERR + 0x0000000C) /* 私钥运算错误 */ +#define INTERNAL_SIGNERR (INTERNAL_BASE_ERR + 0x0000000D) /* 签名错误 */ +#define INTERNAL_VERIFYERR (INTERNAL_BASE_ERR + 0x0000000E) /* 验证错误 */ +#define INTERNAL_SYMOPERR (INTERNAL_BASE_ERR + 0x0000000F) /* 对称运算错误 */ +#define INTERNAL_STEPERR (INTERNAL_BASE_ERR + 0x00000010) /* 步骤错误 */ +#define INTERNAL_FILESIZEERR (INTERNAL_BASE_ERR + 0x00000011) /* 文件大小错误或输入数据长度非法 */ +#define INTERNAL_FILENOEXIST (INTERNAL_BASE_ERR + 0x00000012) /* 文件不存在 */ +#define INTERNAL_FILEOFSERR (INTERNAL_BASE_ERR + 0x00000013) /* 文件操作偏移量错误 */ +#define INTERNAL_KEYTYPEERR (INTERNAL_BASE_ERR + 0x00000014) /* 密钥类型错误 */ +#define INTERNAL_KEYERR (INTERNAL_BASE_ERR + 0x00000015) /* 密钥错误 */ +#define INTERNAL_ENCDATAERR (INTERNAL_BASE_ERR + 0x00000016) /* 加密数据错误 */ +#define INTERNAL_RANDERR (INTERNAL_BASE_ERR + 0x00000017) /* 随机数产生失败 */ +#define INTERNAL_PRKRERR (INTERNAL_BASE_ERR + 0x00000018) /* 私钥使用权限获取失败 */ +#define INTERNAL_MACERR (INTERNAL_BASE_ERR + 0x00000019) /* MAC 运算失败 */ +#define INTERNAL_FILEEXISTS (INTERNAL_BASE_ERR + 0x0000001A) /* 指定文件已存在 */ +#define INTERNAL_FILEWERR (INTERNAL_BASE_ERR + 0x0000001B) /* 文件写入失败 */ +#define INTERNAL_NOBUFFER (INTERNAL_BASE_ERR + 0x0000001C) /* 存储空间不足 */ +#define INTERNAL_INARGERR (INTERNAL_BASE_ERR + 0x0000001D) /* 输入参数错误 */ +#define INTERNAL_OUTARGERR (INTERNAL_BASE_ERR + 0x0000001E) /* 输出参数错误 */ +#define INTERNAL_UKEYERR (INTERNAL_BASE_ERR + 0x0000001F) /* Ukey错误 */ +#define INTERNAL_GENKEYERR (INTERNAL_BASE_ERR + 0x00000020) /* 密钥生成错误 */ +#define INTERNAL_STATEERR (INTERNAL_BASE_ERR + 0x00000021) /* 状态错误 */ +#define INTERNAL_RETRYERR (INTERNAL_BASE_ERR + 0x00000022) /* 重试超过次数 */ +#define INTERNAL_DEVICE_BUSY (INTERNAL_BASE_ERR + 0x00000023) /* 设备忙 */ + + +/*库中自定义错误码*/ +/*特别注意,硬件密码模块的返回值是0表示成功,非0表示失败(错误码),和库对外返回的不一样*/ +#define CRYPT_MOD_OK 1 +#define CRYPT_MOD_ERR 0 + +#define CRYPTO_MOD_BASE_ERR 0x01000 + +#define CRYPTO_MOD_TYPE_REPEATED_ERR (CRYPTO_MOD_BASE_ERR + 0x00001)/*密码模块类型重复设置*/ +#define CRYPTO_MOD_TYPE_INVALID_ERR (CRYPTO_MOD_BASE_ERR + 0x00002)/*无效的密码模块类型*/ +#define CRYPTO_MOD_LIBPATH_REPEATED_ERR (CRYPTO_MOD_BASE_ERR + 0x00003)/*密码模块库路径重复设置*/ +#define CRYPTO_MOD_LIBPATH_INVALID_ERR (CRYPTO_MOD_BASE_ERR + 0x00004)/*无效的密码模块库路径*/ +#define CRYPTO_MOD_CFG_PATH_REPEATED_ERR (CRYPTO_MOD_BASE_ERR + 0x00005)/*密码模块配置文件重复设置*/ +#define CRYPTO_MOD_CFG_PATH_INVALID_ERR (CRYPTO_MOD_BASE_ERR + 0x00006)/*无效的密码模块配置文件*/ +#define CRYPTO_MOD_PARAM_TOO_MANY_ERR (CRYPTO_MOD_BASE_ERR + 0x00007)/*密码卡参数配置过多*/ +#define CRYPTO_MOD_PARAM_INVALID_ERR (CRYPTO_MOD_BASE_ERR + 0x00008)/*无效的参数*/ +#define CRYPTO_MOD_UNSUPPORTED_SYMM_TYPE_ERR (CRYPTO_MOD_BASE_ERR + 0x00009)/*不支持的对称算法类型*/ +#define CRYPTO_MOD_UNSUPPORTED_DIGEST_TYPE_ERR (CRYPTO_MOD_BASE_ERR + 0x0000A)/*不支持的摘要算法类型*/ +#define CRYPTO_MOD_DLOPEN_ERR (CRYPTO_MOD_BASE_ERR + 0x0000B)/*dlopen失败*/ +#define CRYPTO_MOD_DLSYM_ERR (CRYPTO_MOD_BASE_ERR + 0x0000C)/*dlsym失败*/ +#define CRYPTO_MOD_UNLOAD_ERR (CRYPTO_MOD_BASE_ERR + 0x0000D)/*dlclose失败*/ +#define CRYPTO_MOD_NOT_LOADED_ERR (CRYPTO_MOD_BASE_ERR + 0x0000E)/*还未加载驱动库*/ +#define CRYPTO_MOD_NOT_OPENDEVICE_ERR (CRYPTO_MOD_BASE_ERR + 0x0000F)/*还未打开设备*/ +#define CRYPTO_MOD_NOT_OPENSESSION_ERR (CRYPTO_MOD_BASE_ERR + 0x00010)/*还未建立会话*/ +#define CRYPTO_MOD_INVALID_KEY_ERR (CRYPTO_MOD_BASE_ERR + 0x00011)/*无效的密钥*/ +#define CRYPTO_MOD_INVALID_CRYPTO_TYPE_ERR (CRYPTO_MOD_BASE_ERR + 0x00012)/*无效的加解密类型*/ +#define CRYPTO_MOD_INVALID_KEY_CTX_ERR (CRYPTO_MOD_BASE_ERR + 0x00013)/*无效密钥上下文*/ +#define CRYPTO_MOD_UNPADDING_ERR (CRYPTO_MOD_BASE_ERR + 0x00014)/*去pad失败*/ +#define CRYPTO_MOD_NOT_ENOUGH_SPACE_ERR (CRYPTO_MOD_BASE_ERR + 0x00015)/*分配的空间不足*/ +#define CRYPTO_MOD_DETERMINISTIC_DEC_VERIFY_ERR (CRYPTO_MOD_BASE_ERR + 0x00016)/*确定性解密校验失败*/ +#define CRYPTO_MOD_UNKNOWN_PARAM_ERR (CRYPTO_MOD_BASE_ERR + 0xFFFFF)/*未知的参数*/ + +extern void set_thread_errno(int errno); +extern const char* common_get_errmsg(); + +#ifdef __cplusplus +} +#endif + +#endif /* COMMON_ERR_H */ diff --git a/contrib/common_cipher/common_internal_interfaces.cpp b/contrib/common_cipher/common_internal_interfaces.cpp new file mode 100755 index 0000000000..f76e297f23 --- /dev/null +++ b/contrib/common_cipher/common_internal_interfaces.cpp @@ -0,0 +1,796 @@ +#include +#include +#include +#include +#include + +#include "common_internal_interfaces.h" +#include "common_err.h" +#include "common_utils.h" + +#define INTERNAL_SHARED_LOCK 0 +#define INTERNAL_EXCLUSIVE_LOCK 1 + +#define SET_JNTA_CFG_PATH(path) (setenv("TASS_GHVSM_CFG_PATH", path, 1)) + +#define INTERNAL_RETURN(ret) \ +{ \ + if (ret == INTERNAL_OK) \ + return CRYPT_MOD_OK; \ + else \ + return ret; \ +}while(0) + +/*dlopen句柄*/ +static void *driverhandle = NULL; + +/*密码模块接口*/ +static ModuleInterfaces *moduleinterfaces = NULL; + +/*定义一个锁,对driverhandle和moduleinterfaces做读写保护*/ +static pthread_rwlock_t drivermutex; + +/*设备句柄*/ +static void *devicehandle = NULL; + +/*定义一个锁,对devicehandle做读写保护*/ +static pthread_rwlock_t devicehandlemutex; + +/*记录当前空闲的最小索引号,在第一次生成密钥时获取并设置。这个需要保证密钥都是从该接口库生成*/ +static int currnetindex = 0; + +/*定义一个锁,保证同一时刻只有一个线程在生成密钥*/ +static pthread_mutex_t genkeymutex; + + +static int internal_padding(char *indata, int inlen, char *outdata, int *outlen); +static int internal_unpadding(char *indata, int *inlen); + +static void internal_lock(int loketype, pthread_rwlock_t *lock) +{ + if (loketype == INTERNAL_SHARED_LOCK) { + pthread_rwlock_rdlock(lock); + } else if (loketype == INTERNAL_EXCLUSIVE_LOCK){ + pthread_rwlock_wrlock(lock); + } else { + /*do nothing*/ + } +} + +static void internal_unlock(pthread_rwlock_t *lock) +{ + pthread_rwlock_unlock(lock); +} + +ModuleType get_current_module_type() +{ + internal_lock(INTERNAL_SHARED_LOCK, &drivermutex); + + if (moduleinterfaces) { + internal_unlock(&drivermutex); + return moduleinterfaces->type; + } + + internal_unlock(&drivermutex); + + return MODULE_INVALID_TYPE; +} + +static int load_standard_interfaces(ModuleStandardInterfaces *standardinterfaces) +{ + standardinterfaces->InternalOpenDevice = (StandardOpenDevice_type)dlsym(driverhandle, "SDF_OpenDevice"); + standardinterfaces->InternalCloseDevice = (StandardCloseDevice_type)dlsym(driverhandle, "SDF_CloseDevice"); + standardinterfaces->InternalOpenSession = (StandardOpenSession_type)dlsym(driverhandle, "SDF_OpenSession"); + standardinterfaces->InternalCloseSession = (StandardCloseSession_type)dlsym(driverhandle, "SDF_CloseSession"); + standardinterfaces->InternalGenerateRandom = (StandardGenerateRandom_type)dlsym(driverhandle, "SDF_GenerateRandom"); + standardinterfaces->InternalEncrypt = (StandardEncrypt_type)dlsym(driverhandle, "SDF_Encrypt"); + standardinterfaces->InternalDecrypt = (StandardDecrypt_type)dlsym(driverhandle, "SDF_Decrypt"); + standardinterfaces->InternalDestroyKey = (StandardDestroyKey_type)dlsym(driverhandle, "SDF_DestroyKey"); + standardinterfaces->InternalHashInit = (StandardHashInit_type)dlsym(driverhandle, "SDF_HashInit"); + standardinterfaces->InternalHashUpdate = (StandardHashUpdate_type)dlsym(driverhandle, "SDF_HashUpdate"); + standardinterfaces->InternalHashFinal = (StandardHashFinal_type)dlsym(driverhandle, "SDF_HashFinal"); + + if (standardinterfaces->InternalOpenDevice == NULL + || standardinterfaces->InternalCloseDevice == NULL + || standardinterfaces->InternalOpenSession == NULL + || standardinterfaces->InternalCloseSession == NULL + || standardinterfaces->InternalGenerateRandom == NULL + || standardinterfaces->InternalEncrypt == NULL + || standardinterfaces->InternalDecrypt == NULL + || standardinterfaces->InternalDestroyKey == NULL + || standardinterfaces->InternalHashInit == NULL + || standardinterfaces->InternalHashUpdate == NULL + || standardinterfaces->InternalHashFinal == NULL) { + + return CRYPTO_MOD_DLSYM_ERR; + } + + return CRYPT_MOD_OK; +} + +static int load_gdac_interfaces(ModuleGdacInterfaces* gdacinterfaces) +{ + gdacinterfaces->GDACGenerateKEK = (GDACGenerateKEK_type)dlsym(driverhandle, "SDFE_GenerateKEK"); + gdacinterfaces->GDACHMAC = (GDACHMAC_type)dlsym(driverhandle, "SDFE_Hmac"); + gdacinterfaces->GDACEncryptWithIndex = (GDACEncryptWithIndex_type)dlsym(driverhandle, "SDFE_Encrypt"); + gdacinterfaces->GDACDecryptWithIndex = (GDACDecryptWithIndex_type)dlsym(driverhandle, "SDFE_Decrypt"); + gdacinterfaces->GDACExportKEK = (GDACExportKEK_type)dlsym(driverhandle, "SDFE_ExportKEK"); + gdacinterfaces->GDACGetkeyState = (GDACGetkeyState_type)dlsym(driverhandle, "SDFE_GetkeyState"); + + if (gdacinterfaces->GDACGenerateKEK == NULL + || gdacinterfaces->GDACHMAC == NULL + || gdacinterfaces->GDACEncryptWithIndex == NULL + || gdacinterfaces->GDACDecryptWithIndex == NULL + || gdacinterfaces->GDACExportKEK == NULL + || gdacinterfaces->GDACGetkeyState == NULL) { + + return CRYPTO_MOD_DLSYM_ERR; + } + + return CRYPT_MOD_OK; +} + +static int load_jnta_interfaces(ModuleJntaInterfaces* jntainterfaces) +{ + jntainterfaces->JNTACalculateHmac = (JNTACalculateHmac_type)dlsym(driverhandle, "Tass_CalculateHmac"); + jntainterfaces->JNTAGenerateSymmKeyWithLMK = (JNTAGenerateSymmKeyWithLMK_type)dlsym(driverhandle, "Tass_GenerateSymmKeyWithLMK"); + jntainterfaces->JNTAImportKeyCipherByLMK = (JNTAImportKeyCipherByLMK_type)dlsym(driverhandle, "Tass_ImportKeyCipherByLMK"); + + if (jntainterfaces->JNTACalculateHmac == NULL + || jntainterfaces->JNTAGenerateSymmKeyWithLMK == NULL + || jntainterfaces->JNTAImportKeyCipherByLMK == NULL) { + + return CRYPTO_MOD_DLSYM_ERR; + } + + return CRYPT_MOD_OK; +} + +static int load_swxa_interfaces(ModuleSwxaInterfaces* swxainterfaces) +{ + swxainterfaces->SWXAGenerateKeyCipher = (SWXAGenerateKeyCipher_type)dlsym(driverhandle, "SDF_GenerateKey_Cipher"); + swxainterfaces->SWXAEncKeyEncrypt = (SWXAEncKeyEncrypt_type)dlsym(driverhandle, "SDF_EncKeyEncrypt"); + swxainterfaces->SWXAEncKeyDecrypt = (SWXAEncKeyDecrypt_type)dlsym(driverhandle, "SDF_EncKeyDecrypt"); + swxainterfaces->SWXAHMAC = (SWXAHMAC_type)dlsym(driverhandle, "SDF_EncKeyHMAC"); + swxainterfaces->SWXAOpenDeviceWithPathAndName = (SWXAOpenDeviceWithPathAndName_type)dlsym(driverhandle, "SDF_OpenDeviceWithPathAndName"); + + if (swxainterfaces->SWXAGenerateKeyCipher == NULL + || swxainterfaces->SWXAEncKeyEncrypt == NULL + || swxainterfaces->SWXAEncKeyDecrypt == NULL + || swxainterfaces->SWXAHMAC == NULL + || swxainterfaces->SWXAOpenDeviceWithPathAndName == NULL) { + + return CRYPTO_MOD_DLSYM_ERR; + } + + return CRYPT_MOD_OK; +} + +/*加载驱动库*/ +int load_module_driver(ModuleParams moduleparams) +{ + + int ret = CRYPT_MOD_OK; + + /*加写锁*/ + internal_lock(INTERNAL_EXCLUSIVE_LOCK, &drivermutex); + + /*检查是否已经加载,如果已经加载过,则直接返回*/ + if (driverhandle && moduleinterfaces) { + internal_unlock(&drivermutex); + return CRYPT_MOD_OK; + } + + /*dlopen动态库*/ + driverhandle = dlopen(moduleparams.libpath, RTLD_LAZY); + if (driverhandle == NULL) { + internal_unlock(&drivermutex); + return CRYPTO_MOD_DLOPEN_ERR; + } + + /*为接口分配空间*/ + moduleinterfaces = (ModuleInterfaces*)malloc(sizeof(ModuleInterfaces)); + + moduleinterfaces->type = moduleparams.moduletype; + + moduleinterfaces->standardInterfaces = (ModuleStandardInterfaces*)malloc(sizeof(ModuleStandardInterfaces)); + + /*加载标准接口*/ + ret = load_standard_interfaces(moduleinterfaces->standardInterfaces); + if (ret != CRYPT_MOD_OK) { + free(moduleinterfaces->standardInterfaces); + free(moduleinterfaces); + internal_unlock(&drivermutex); + return ret; + } + + /*加载扩展接口*/ + switch (moduleparams.moduletype) + { + case MODULE_GDAC_CARD_TYPE: + moduleinterfaces->extendInterfaces.gdacInterfaces = (ModuleGdacInterfaces*)malloc(sizeof(ModuleGdacInterfaces)); + ret = load_gdac_interfaces(moduleinterfaces->extendInterfaces.gdacInterfaces); + break; + case MODULE_JNTA_KMS_TYPE: + moduleinterfaces->extendInterfaces.jntaInterfaces = (ModuleJntaInterfaces*)malloc(sizeof(ModuleJntaInterfaces)); + ret = load_jnta_interfaces(moduleinterfaces->extendInterfaces.jntaInterfaces); + break; + case MODULE_SWXA_KMS_TYPE: + moduleinterfaces->extendInterfaces.swxaInterfaces = (ModuleSwxaInterfaces*)malloc(sizeof(ModuleSwxaInterfaces)); + ret = load_swxa_interfaces(moduleinterfaces->extendInterfaces.swxaInterfaces); + break; + default: + free(moduleinterfaces->standardInterfaces); + free(moduleinterfaces); + internal_unlock(&drivermutex); + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + + if (ret != CRYPT_MOD_OK) { + if (moduleinterfaces->extendInterfaces.gdacInterfaces) + free(moduleinterfaces->extendInterfaces.gdacInterfaces); + if (moduleinterfaces->extendInterfaces.jntaInterfaces) + free(moduleinterfaces->extendInterfaces.jntaInterfaces); + if (moduleinterfaces->extendInterfaces.swxaInterfaces) + free(moduleinterfaces->extendInterfaces.swxaInterfaces); + free(moduleinterfaces->standardInterfaces); + free(moduleinterfaces); + moduleinterfaces = NULL; + internal_unlock(&drivermutex); + return ret; + } + + internal_unlock(&drivermutex); + return CRYPT_MOD_OK; + +} + +int unload_module_driver() +{ + /*加写锁*/ + internal_lock(INTERNAL_EXCLUSIVE_LOCK, &drivermutex); + if (driverhandle == NULL || moduleinterfaces) { + + internal_unlock(&drivermutex); + return CRYPTO_MOD_UNLOAD_ERR; + } + + /*释放接口空间*/ + + if (moduleinterfaces->extendInterfaces.gdacInterfaces) { + free(moduleinterfaces->extendInterfaces.gdacInterfaces); + moduleinterfaces->extendInterfaces.gdacInterfaces = NULL; + } + + if (moduleinterfaces->extendInterfaces.jntaInterfaces) { + free(moduleinterfaces->extendInterfaces.jntaInterfaces); + moduleinterfaces->extendInterfaces.jntaInterfaces = NULL; + } + + if (moduleinterfaces->extendInterfaces.swxaInterfaces) { + free(moduleinterfaces->extendInterfaces.swxaInterfaces); + moduleinterfaces->extendInterfaces.swxaInterfaces = NULL; + } + + free(moduleinterfaces->standardInterfaces); + moduleinterfaces->standardInterfaces = NULL; + + free(moduleinterfaces); + moduleinterfaces = NULL; + + dlclose(driverhandle); + driverhandle = NULL; + + internal_unlock(&drivermutex); + + return CRYPT_MOD_OK; + +} + +int internal_open_device(char* cfg_path) +{ + /*这里调用密码硬件的接口,需要注意硬件接口返回0表示成功。*/ + int ret = INTERNAL_OK; + + /*判断是否已加载驱动并加载驱动接口*/ + internal_lock(INTERNAL_SHARED_LOCK, &drivermutex); + + if (driverhandle == NULL || moduleinterfaces == NULL) { + internal_unlock(&drivermutex); + return CRYPTO_MOD_NOT_LOADED_ERR; + } + + /*大胆一些,在这里释放掉锁*/ + internal_unlock(&drivermutex); + + /*对设备句柄加写锁*/ + internal_lock(INTERNAL_EXCLUSIVE_LOCK, &devicehandlemutex); + + /*已经打开过,直接返回*/ + if (devicehandle != NULL) { + internal_unlock(&devicehandlemutex); + return CRYPT_MOD_OK; + } + + switch (moduleinterfaces->type) + { + case MODULE_GDAC_CARD_TYPE: + ret = moduleinterfaces->standardInterfaces->InternalOpenDevice(&devicehandle); + break; + case MODULE_SWXA_KMS_TYPE: + ret = moduleinterfaces->extendInterfaces.swxaInterfaces->SWXAOpenDeviceWithPathAndName((unsigned char*)cfg_path, &devicehandle); + break; + case MODULE_JNTA_KMS_TYPE: + /*江南天安从环境变量获取配置文件路径,这里设置一下*/ + SET_JNTA_CFG_PATH(cfg_path); + ret = moduleinterfaces->standardInterfaces->InternalOpenDevice(&devicehandle); + break; + default: + internal_unlock(&devicehandlemutex); + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + + internal_unlock(&devicehandlemutex); + + INTERNAL_RETURN(ret); + +} + +int internal_close_device() +{ + int ret = INTERNAL_OK; + + internal_lock(INTERNAL_EXCLUSIVE_LOCK, &devicehandlemutex); + + if (devicehandle != NULL) { + ret = moduleinterfaces->standardInterfaces->InternalCloseDevice(devicehandle); + devicehandle = NULL; + } + + internal_unlock(&devicehandlemutex); + + INTERNAL_RETURN(ret); +} + +int internal_open_session(void **sess) +{ + int ret = INTERNAL_OK; + + internal_lock(INTERNAL_SHARED_LOCK, &devicehandlemutex); + + if (devicehandle == NULL) { + internal_unlock(&devicehandlemutex); + return CRYPTO_MOD_NOT_OPENDEVICE_ERR; + } + + ret = moduleinterfaces->standardInterfaces->InternalOpenSession(devicehandle, sess); + + internal_unlock(&devicehandlemutex); + + INTERNAL_RETURN(ret); +} + +int internal_close_session(void *sess) +{ + int ret = INTERNAL_OK; + + if (sess == NULL) { + return CRYPTO_MOD_NOT_OPENSESSION_ERR; + } + + internal_lock(INTERNAL_SHARED_LOCK, &devicehandlemutex); + + if (devicehandle == NULL) { + internal_unlock(&devicehandlemutex); + return CRYPTO_MOD_NOT_OPENDEVICE_ERR; + } + + ret = moduleinterfaces->standardInterfaces->InternalCloseSession(sess); + + internal_unlock(&devicehandlemutex); + + INTERNAL_RETURN(ret); +} + +static bool use_key_index() +{ + switch (moduleinterfaces->type) { + case MODULE_GDAC_CARD_TYPE: + case MODULE_JNTA_KMS_TYPE: + return true; + case MODULE_SWXA_KMS_TYPE: + return false; + default: + return false; + } +} + +static int init_gdac_key_index(void* sess) +{ + int keystatus = 0; + int i =0; + int ret = INTERNAL_OK; + + /*需要获取当前存在密钥的最大索引值*/ + for (i = INTERNAL_MIN_INDEX_NUM; i <= INTERNAL_MAX_INDEX_NUM; i++) { + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACGetkeyState(sess, GDAC_KEK_TYPE, (unsigned int)i, (unsigned int*)&keystatus); + if (ret != INTERNAL_OK) { + return ret; + } + if (keystatus == GDAC_KEY_NOT_EXIST) { + currnetindex = i; + break; + } + } + + if (i == (INTERNAL_MAX_INDEX_NUM + 1)) { + return INTERNAL_NOBUFFER; + } + + return INTERNAL_OK; +} + +static int init_jnta_key_index(void* sess) +{ + int ret = INTERNAL_OK; + int i = 0; + InternalKeyCtx *keyctx = NULL; + + unsigned char tmpdata[] = {"12345678"}; + int tmplen = 8; + + unsigned char tmpiv[] = {"1234567812345678"}; + int tmpivlen = 16; + + unsigned char tmpout[32] = {0}; + unsigned int tmpoutlen = 0; + keyctx = (InternalKeyCtx*)malloc(sizeof(InternalKeyCtx)); + + keyctx->session = sess; + + keyctx->enc = INTERNAL_DO_ENC; + + keyctx->algotype = TA_SM4; + keyctx->algomode = TA_CBC; + + /*需要获取当前存在密钥的最大索引值*/ + for (i = INTERNAL_MIN_INDEX_NUM; i <= INTERNAL_MAX_INDEX_NUM; i++) { + sprintf((char *)(keyctx->key), "%d",i); + keyctx->keysize = strlen((char*)(keyctx->key)); + /*没有获取密钥状态接口,通过做加密判断错误发为密钥不存在*/ + ret = internal_symm_encrypt(keyctx, tmpdata, tmplen, tmpiv, tmpivlen, tmpout, &tmpoutlen, NULL); + if (ret == INTERNAL_KEYNOTEXIST) { + currnetindex = i; + break; + } + memset(keyctx->key, 0x0, INTERNAL_MAX_KEY_LEN); + } + + free(keyctx); + + if (i == (INTERNAL_MAX_INDEX_NUM + 1)) { + return INTERNAL_NOBUFFER; + } + + return INTERNAL_OK; + +} +static int init_current_key_index(void* sess) +{ + int ret = INTERNAL_OK; + + switch (moduleinterfaces->type) { + case MODULE_GDAC_CARD_TYPE: + ret = init_gdac_key_index(sess); + break; + case MODULE_JNTA_KMS_TYPE: + ret = init_jnta_key_index(sess); + break; + case MODULE_SWXA_KMS_TYPE: + default: + break; + } + + return ret; + +} + + +int internal_generate_symm_key(void* sess, ModuleSymmKeyAlgo algo, unsigned char* key, unsigned int* keylen) +{ + int ret = INTERNAL_OK; + bool useindex = use_key_index(); + + if (sess == NULL) { + return CRYPTO_MOD_NOT_OPENSESSION_ERR; + } + + if (useindex) { + pthread_mutex_lock(&genkeymutex); + if (currnetindex == 0) { + ret = init_current_key_index(sess); + if (ret != INTERNAL_OK) { + pthread_mutex_unlock(&genkeymutex); + return ret; + } + } else if (currnetindex == (INTERNAL_MAX_INDEX_NUM + 1)) { + pthread_mutex_unlock(&genkeymutex); + return INTERNAL_NOBUFFER; + } + } + + switch(moduleinterfaces->type) { + case MODULE_GDAC_CARD_TYPE: + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACGenerateKEK(sess, get_key_len_by_algo_type(algo), currnetindex); + break; + case MODULE_JNTA_KMS_TYPE: { + unsigned int realalgo; + unsigned int realmode; + unsigned char tmpcipher[INTERNAL_MAX_KEY_LEN + 1] = {0}; + unsigned int tmpcipherlen = 0; + unsigned char kcv[8] = {0}; + unsigned int kcvlen = 0; + + ret = get_real_symm_algo_type(moduleinterfaces->type, algo, &realalgo, &realmode); + if (ret != CRYPT_MOD_OK) + break; + ret = moduleinterfaces->extendInterfaces.jntaInterfaces->JNTAGenerateSymmKeyWithLMK(sess, (TA_SYMM_ALG)realalgo, tmpcipher, &tmpcipherlen, kcv, &kcvlen); + if (ret != INTERNAL_OK) + break; + ret = moduleinterfaces->extendInterfaces.jntaInterfaces->JNTAImportKeyCipherByLMK(sess, currnetindex, 0, 0, (TA_SYMM_ALG)realalgo, 0, NULL, 0, NULL, 0, tmpcipher, tmpcipherlen, kcv, 0); + if (ret != INTERNAL_OK) + break; + break; + } + case MODULE_SWXA_KMS_TYPE: + ret = moduleinterfaces->extendInterfaces.swxaInterfaces->SWXAGenerateKeyCipher(sess, get_key_len_by_algo_type(algo), key, keylen); + break; + default: + ret = CRYPTO_MOD_TYPE_INVALID_ERR; + break; + } + + if (useindex) { + if (ret != CRYPT_MOD_OK && ret != INTERNAL_OK) { + pthread_mutex_unlock(&genkeymutex); + return ret; + } else { + sprintf((char*)key, "%d",currnetindex); + *keylen = strlen((char*)key); + currnetindex++; + pthread_mutex_unlock(&genkeymutex); + return CRYPT_MOD_OK; + } + } + + if (ret != CRYPT_MOD_OK && ret != INTERNAL_OK) { + return ret; + } else { + return CRYPT_MOD_OK; + } + +} + +int internal_symm_encrypt(void *keyctx, unsigned char *indata, unsigned int inlen, unsigned char *iv, unsigned int ivlen, unsigned char *outdata, unsigned int *outlen, unsigned char *tag) +{ + int ret = INTERNAL_OK; + + InternalKeyCtx *tmpctx = (InternalKeyCtx *)keyctx; + unsigned char *paddingdata = NULL; + int paddinglen = 0; + unsigned char tmpiv[INTERNAL_IV_LEN + 1] = {0}; + + memcpy(tmpiv, iv, ivlen); + paddinglen = inlen + INTERNAL_BLOCK_LEN + 1; + paddingdata = (unsigned char *)malloc(paddinglen); + + memset(paddingdata, 0x0, paddinglen); + internal_padding((char*)indata, inlen, (char*)paddingdata, &paddinglen); + + switch (moduleinterfaces->type) { + case MODULE_GDAC_CARD_TYPE: { + unsigned char tmpkey[INTERNAL_MAX_KEY_LEN] = {0}; + unsigned int keylen = 0; + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACExportKEK(tmpctx->session, atoi((char*)tmpctx->key), tmpkey, &keylen); + if (ret != INTERNAL_OK) + return ret; + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACEncryptWithIndex(tmpctx->session, tmpkey, keylen, tmpctx->algotype, tmpiv, ivlen, paddingdata, paddinglen, outdata, outlen, NULL, 0, NULL); + break; + } + case MODULE_JNTA_KMS_TYPE: { + int tmpindex = atoi((char*)(tmpctx->key)); + unsigned int standardtype = 0; + transform_jnta_algo_type(tmpctx->algotype, tmpctx->algomode, &standardtype); + ret = moduleinterfaces->standardInterfaces->InternalEncrypt(tmpctx->session, (void*)&tmpindex, standardtype, tmpiv, paddingdata, paddinglen, outdata, outlen); + break; + } + case MODULE_SWXA_KMS_TYPE: + ret = moduleinterfaces->extendInterfaces.swxaInterfaces->SWXAEncKeyEncrypt(tmpctx->session, tmpctx->key, tmpctx->keysize, tmpctx->algotype, tmpiv, paddingdata, paddinglen, outdata, outlen); + break; + default: + ret = CRYPTO_MOD_TYPE_INVALID_ERR; + break;; + } + + free(paddingdata); + + INTERNAL_RETURN(ret); + +} + +int internal_symm_decrypt(void *keyctx, unsigned char *indata, unsigned int inlen, unsigned char *iv, unsigned int ivlen, unsigned char *outdata, unsigned int *outlen, unsigned char *tag) +{ + int ret = INTERNAL_OK; + + InternalKeyCtx *tmpctx = (InternalKeyCtx *)keyctx; + unsigned char tmpiv[INTERNAL_IV_LEN + 1] = {0}; + + memcpy(tmpiv, iv, ivlen); + + switch (moduleinterfaces->type) { + case MODULE_GDAC_CARD_TYPE: { + unsigned char tmpkey[INTERNAL_MAX_KEY_LEN] = {0}; + unsigned int keylen = 0; + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACExportKEK(tmpctx->session, atoi((char*)tmpctx->key), tmpkey, &keylen); + if (ret != INTERNAL_OK) + return ret; + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACDecryptWithIndex(tmpctx->session, tmpkey, keylen, tmpctx->algotype, tmpiv, ivlen, indata, inlen, outdata, outlen, NULL, 0, NULL); + break; + } + case MODULE_JNTA_KMS_TYPE: { + int index = atoi((char *)(tmpctx->key)); + unsigned int standardtype = 0; + transform_jnta_algo_type(tmpctx->algotype, tmpctx->algomode, &standardtype); + ret = moduleinterfaces->standardInterfaces->InternalDecrypt(tmpctx->session, (void*)&index, standardtype, tmpiv, indata, inlen, outdata, outlen); + break; + } + case MODULE_SWXA_KMS_TYPE: + ret = moduleinterfaces->extendInterfaces.swxaInterfaces->SWXAEncKeyDecrypt(tmpctx->session, tmpctx->key, tmpctx->keysize, tmpctx->algotype, tmpiv, indata, inlen, outdata, outlen); + break; + default: + ret = CRYPTO_MOD_TYPE_INVALID_ERR; + break; + } + + if (ret == INTERNAL_OK) { + return internal_unpadding((char*)outdata, (int*)outlen); + } + + INTERNAL_RETURN(ret); + +} + +int internal_digest(void *sess, ModuleDigestAlgo algo, unsigned char * indata, unsigned int inlen, unsigned char *outdata, unsigned int *outlen) +{ + int ret = CRYPT_MOD_OK; + unsigned int realtype = 0; + int position = 0; + int updatelen = 0; + + ret = get_real_digest_algo_type(moduleinterfaces->type, algo, &realtype); + if (ret != CRYPT_MOD_OK) { + return ret; + } + + ret = moduleinterfaces->standardInterfaces->InternalHashInit(sess, realtype, NULL, NULL, 0); + if (ret != INTERNAL_OK) + return ret; + + while (inlen) { + if (inlen >= INTERNAL_MSG_BLOCK_LEN) { + updatelen = INTERNAL_MSG_BLOCK_LEN; + inlen -= INTERNAL_MSG_BLOCK_LEN; + } else { + updatelen = inlen; + inlen = 0; + } + + ret = moduleinterfaces->standardInterfaces->InternalHashUpdate(sess, indata + position, updatelen); + if (ret != INTERNAL_OK) + return ret; + position += updatelen; + } + + ret = moduleinterfaces->standardInterfaces->InternalHashFinal(sess, outdata, outlen); + + INTERNAL_RETURN(ret); +} + +/*如果传入的data为NULL,并且data_size为0,则对key自身做hmac*/ +int internal_hmac(void *ctx, unsigned char * data, unsigned int data_size, unsigned char *result, size_t *result_size) +{ + int ret = INTERNAL_OK; + + InternalKeyCtx *tmpctx = (InternalKeyCtx *)ctx; + + switch (moduleinterfaces->type) { + case MODULE_GDAC_CARD_TYPE:{ + unsigned char tmpkey[INTERNAL_MAX_KEY_LEN] = {0}; + unsigned int keylen = 0; + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACExportKEK(tmpctx->session, atoi((char*)tmpctx->key), tmpkey, &keylen); + if (ret != INTERNAL_OK) + return ret; + ret = moduleinterfaces->extendInterfaces.gdacInterfaces->GDACHMAC(tmpctx->session, tmpkey, keylen, tmpctx->algotype, + (data != NULL) ? data : tmpkey, (data_size != 0) ? data_size : keylen, result, (unsigned int*)result_size); + memset(tmpkey,0x0,INTERNAL_MAX_KEY_LEN); + break; + } + case MODULE_JNTA_KMS_TYPE:{ + ret = moduleinterfaces->extendInterfaces.jntaInterfaces->JNTACalculateHmac(tmpctx->session, (TA_HMAC_ALG)tmpctx->algotype, atoi((char*)tmpctx->key), + NULL, 0, (data != NULL) ? data : tmpctx->key, (data_size != 0) ? data_size : tmpctx->keysize, result, (unsigned int*)result_size); + break; + } + case MODULE_SWXA_KMS_TYPE:{ + ret = moduleinterfaces->extendInterfaces.swxaInterfaces->SWXAHMAC(tmpctx->session, tmpctx->key, tmpctx->keysize, tmpctx->algotype, + (data != NULL) ? data : tmpctx->key, (data_size != 0) ? data_size : tmpctx->keysize, result, (unsigned int*)result_size); + break; + } + default: + ret = CRYPTO_MOD_TYPE_INVALID_ERR; + break; + } + + INTERNAL_RETURN(ret); + +} + +int internal_generate_random(void *sess, char *buffer, size_t size) +{ + int ret = INTERNAL_OK; + + ret = moduleinterfaces->standardInterfaces->InternalGenerateRandom(sess, size, (unsigned char*)buffer); + + INTERNAL_RETURN(ret); +} + + +/*第一位补0x80,后面补0x00,强补,即如果inlen正好是16的整数倍,则补一个0x80和15个0x00,如果只缺一,则只补一个0x80*/ +static int internal_padding(char *indata, int inlen, char *outdata, int *outlen) +{ + int i = 0; + int firstpad = 0x80; + int secondpad = 0x00; + + int paddlen = 0; + + paddlen = INTERNAL_BLOCK_LEN - inlen%INTERNAL_BLOCK_LEN; + + memcpy(outdata, indata, inlen); + + + for (i = 0; i < paddlen; i++) { + if (i == 0) + outdata[inlen + i] = firstpad; + else + outdata[inlen + i] = secondpad; + } + + *outlen = inlen + paddlen; + + return CRYPT_MOD_OK; +} + +static int internal_unpadding(char *indata, int *inlen) +{ + int firstpad = 0x80; + int secondpad = 0x00; + int tmplen = 0; + + tmplen = *inlen - 1; + + while (*(indata + tmplen) == secondpad) { + tmplen--; + } + + if (tmplen >= ((*inlen) - INTERNAL_BLOCK_LEN) && *(unsigned char*)(indata + tmplen) == firstpad) { + *inlen = tmplen; + } else { + return CRYPTO_MOD_UNPADDING_ERR; + } + + indata[tmplen] = '\0'; + + return CRYPT_MOD_OK; +} + + diff --git a/contrib/common_cipher/common_internal_interfaces.h b/contrib/common_cipher/common_internal_interfaces.h new file mode 100755 index 0000000000..b7aa6f1bdc --- /dev/null +++ b/contrib/common_cipher/common_internal_interfaces.h @@ -0,0 +1,127 @@ +#ifndef COMMON_INTERNAL_INTERFACES_H +#define COMMON_INTERNAL_INTERFACES_H + +#include "common_algo.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ECCref_MAX_BITS 512 +#define ECCref_MAX_LEN ((ECCref_MAX_BITS+7) / 8) + +/* ECC key data public key structure definition */ +typedef struct ECCrefPublicKey_st +{ + unsigned int bits; + unsigned char x[ECCref_MAX_LEN]; + unsigned char y[ECCref_MAX_LEN]; +} ECCrefPublicKey; + +typedef int (*StandardOpenDevice_type)(void**); +typedef int (*StandardCloseDevice_type)(void*); +typedef int (*StandardOpenSession_type)(void*, void**); +typedef int (*StandardCloseSession_type)(void*); +typedef int (*StandardGenerateRandom_type)(void*, unsigned int, unsigned char*); +typedef int (*StandardHashInit_type)(void*, unsigned int, ECCrefPublicKey*, unsigned char*, unsigned int); +typedef int (*StandardHashUpdate_type)(void*, unsigned char*, unsigned int); +typedef int (*StandardHashFinal_type)(void*, unsigned char*, unsigned int*); +typedef int (*StandardEncrypt_type)(void*, void*, unsigned int, unsigned char*, unsigned char*, unsigned int, unsigned char*, unsigned int*); +typedef int (*StandardDecrypt_type)(void*, void*, unsigned int, unsigned char*, unsigned char*, unsigned int, unsigned char*, unsigned int*); +typedef int (*StandardDestroyKey_type)(void*, void*); + + +/*SDF标准接口*/ +typedef struct { + StandardOpenDevice_type InternalOpenDevice; /*打开设备*/ + StandardCloseDevice_type InternalCloseDevice; /*关闭设备*/ + StandardOpenSession_type InternalOpenSession; /*打开会话*/ + StandardCloseSession_type InternalCloseSession; /*关闭会话*/ + StandardGenerateRandom_type InternalGenerateRandom; /*生成随机数*/ + StandardHashInit_type InternalHashInit; /*哈希初始化*/ + StandardHashUpdate_type InternalHashUpdate; /*哈希update*/ + StandardHashFinal_type InternalHashFinal; /*哈希结束*/ + StandardEncrypt_type InternalEncrypt; /*使用密钥句柄加密*/ + StandardDecrypt_type InternalDecrypt; /*使用密钥句柄解密*/ + StandardDestroyKey_type InternalDestroyKey; /*销毁密钥句柄*/ +}ModuleStandardInterfaces; + + +typedef int (*GDACHMAC_type)(void*, unsigned char*, unsigned int, unsigned int, unsigned char*, unsigned int, unsigned char*, unsigned int*); +typedef int (*GDACGenerateKEK_type)(void*, unsigned int, unsigned int); +typedef int (*GDACEncryptWithIndex_type)(void*, unsigned char*, unsigned int, unsigned int, unsigned char*, unsigned int, unsigned char*, unsigned int, unsigned char*, unsigned int*, unsigned char*, unsigned int, unsigned char*); +typedef int (*GDACDecryptWithIndex_type)(void*, unsigned char*, unsigned int, unsigned int, unsigned char*, unsigned int, unsigned char*, unsigned int, unsigned char*, unsigned int*, unsigned char*, unsigned int, unsigned char*); +typedef int (*GDACExportKEK_type)(void*, unsigned int, void*, unsigned int*); +typedef int (*GDACGetkeyState_type)(void*, unsigned int, unsigned int, unsigned int*); + +/*光电安辰扩展接口*/ +typedef struct { + GDACHMAC_type GDACHMAC; /*hmac*/ + GDACGenerateKEK_type GDACGenerateKEK; /*生成指定索引密钥*/ + GDACEncryptWithIndex_type GDACEncryptWithIndex; /*使用索引密钥加密*/ + GDACDecryptWithIndex_type GDACDecryptWithIndex; /*使用索引密钥解密*/ + GDACExportKEK_type GDACExportKEK; /*导出KEK,不对外提供,内做索引转密钥使用*/ + GDACGetkeyState_type GDACGetkeyState; /*获取密钥状态*/ +}ModuleGdacInterfaces; + + +typedef int (*SWXAOpenDeviceWithPathAndName_type)(unsigned char*, void **); +typedef int (*SWXAHMAC_type)(void *, unsigned char *, unsigned int, unsigned int, unsigned char *, unsigned int, unsigned char *,unsigned int*); +typedef int (*SWXAGenerateKeyCipher_type)(void*, unsigned int, unsigned char*, unsigned int*); +typedef int (*SWXAEncKeyEncrypt_type)(void*, unsigned char*, unsigned int, unsigned int, unsigned char*, unsigned char*, unsigned int, unsigned char*, unsigned int*); +typedef int (*SWXAEncKeyDecrypt_type)(void*, unsigned char*, unsigned int, unsigned int, unsigned char*, unsigned char*, unsigned int, unsigned char*, unsigned int*); + +/*三未信安扩展接口*/ +typedef struct { + SWXAOpenDeviceWithPathAndName_type SWXAOpenDeviceWithPathAndName; /*指定配置文件路径和名称打开设备*/ + SWXAHMAC_type SWXAHMAC; /*hmac*/ + SWXAGenerateKeyCipher_type SWXAGenerateKeyCipher; /*生成密钥密文*/ + SWXAEncKeyEncrypt_type SWXAEncKeyEncrypt; /*使用密钥密文加密*/ + SWXAEncKeyDecrypt_type SWXAEncKeyDecrypt; /*使用密钥密文解密*/ +}ModuleSwxaInterfaces; + + +typedef int (*JNTACalculateHmac_type)(void *, TA_HMAC_ALG, unsigned int, const unsigned char*, unsigned int, unsigned char*, unsigned int, unsigned char*, unsigned int*); +typedef int (*JNTAGenerateSymmKeyWithLMK_type)(void *, TA_SYMM_ALG, unsigned char*, unsigned int*, unsigned char*,unsigned int*); +typedef int (*JNTAImportKeyCipherByLMK_type)(void*, unsigned int, int, int, TA_SYMM_ALG, int, const unsigned char*, unsigned int, const unsigned char*, unsigned int, + const unsigned char*, unsigned int, const unsigned char symmKcv[8], unsigned int); + + +/*江南天安扩展接口*/ +typedef struct { + JNTACalculateHmac_type JNTACalculateHmac; /*hmac*/ + JNTAGenerateSymmKeyWithLMK_type JNTAGenerateSymmKeyWithLMK; /*生成LMK加密的密钥密文*/ + JNTAImportKeyCipherByLMK_type JNTAImportKeyCipherByLMK; /*导入LMK加密的密钥密文到指定的索引位置*/ +}ModuleJntaInterfaces; + + +/*硬件提供的所有接口*/ +typedef struct { + ModuleType type; + ModuleStandardInterfaces *standardInterfaces; + union { + ModuleGdacInterfaces *gdacInterfaces; + ModuleSwxaInterfaces *swxaInterfaces; + ModuleJntaInterfaces *jntaInterfaces; + }extendInterfaces; +}ModuleInterfaces; + +extern int load_module_driver(ModuleParams moduleparams); +extern int unload_module_driver(); +extern int internal_open_device(char* cfg_path); +extern int internal_close_device(); +extern int internal_open_session(void **sess); +extern int internal_close_session(void *sess); +extern ModuleType get_current_module_type(); +extern int internal_generate_symm_key(void* sess, ModuleSymmKeyAlgo algo, unsigned char* key, unsigned int* keylen); +extern int internal_symm_encrypt(void *keyctx, unsigned char *indata, unsigned int inlen, unsigned char *iv, unsigned int ivlen, unsigned char *outdata, unsigned int *outlen, unsigned char *tag); +extern int internal_symm_decrypt(void *keyctx, unsigned char *indata, unsigned int inlen, unsigned char *iv, unsigned int ivlen, unsigned char *outdata, unsigned int *outlen, unsigned char *tag); +extern int internal_digest(void *sess, ModuleDigestAlgo algo, unsigned char * indata, unsigned int inlen, unsigned char *outdata, unsigned int *outlen); +extern int internal_hmac(void *ctx, unsigned char * data, unsigned int data_size, unsigned char *result, long unsigned int *result_size); +extern int internal_generate_random(void *sess, char *buffer, long unsigned int size); + +#ifdef __cplusplus +} +#endif + +#endif /* COMMON_INTERNAL_INTERFACES_H */ diff --git a/contrib/common_cipher/common_utils.cpp b/contrib/common_cipher/common_utils.cpp new file mode 100755 index 0000000000..5b06cf67d3 --- /dev/null +++ b/contrib/common_cipher/common_utils.cpp @@ -0,0 +1,155 @@ +#include +#include +#include +#include "common_utils.h" +#include "common_err.h" +#include "common_algo.h" + +/*去掉字符串头和尾的空格*/ +static char* remove_space(char *input) +{ + int header = 0; + int end = 0; + int inputlen = 0; + const char filter = ' '; + char *tmp = NULL; + + if (input == NULL || input[0] == '\0') { + return NULL; + } + + inputlen = strlen(input); + + /*去头部空格*/ + while(input[header++] == filter && header < inputlen) { + /*do nothing*/ + } + + end = inputlen - 1; + + /*去尾部空格*/ + while(input[end--] == filter && end >= (header - 1)) { + /*do nothing*/ + } + + tmp = (char*)malloc(inputlen + 1); + memset(tmp,0x0,inputlen + 1); + + /*初始位置为header - 1,长度为(end + 1)- (header -1) + 1*/ + memcpy(tmp, input + header - 1, end - header + 3); + + return tmp; +} + +static int set_module_params(char* k, char* v, ModuleParams *moduleparams) +{ + if (IS_MODULE_TYPE(k)) { + if (moduleparams->moduletype != MODULE_INVALID_TYPE) { + return CRYPTO_MOD_TYPE_REPEATED_ERR; + } + + moduleparams->moduletype = GET_MODULE_TYPE(v); + + if (moduleparams->moduletype == MODULE_INVALID_TYPE) { + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + } else if (IS_MODULE_LIB_PATH(k)) { + + if (moduleparams->libpath[0] != '\0') { + return CRYPTO_MOD_LIBPATH_REPEATED_ERR; + } + + if (v[0] == '\0' || strlen(v) >= MODULE_MAX_PATH_LEN) { + return CRYPTO_MOD_LIBPATH_INVALID_ERR; + } else { + memcpy(moduleparams->libpath, v, strlen(v)); + } + } else if(IS_MODULE_CONFIG_FILE_PATH(k)) { + + if (moduleparams->cfgfilepath[0] != '\0') { + return CRYPTO_MOD_CFG_PATH_REPEATED_ERR; + } + + if (v[0] == '\0' || strlen(v) >= MODULE_MAX_PATH_LEN) { + return CRYPTO_MOD_CFG_PATH_INVALID_ERR; + } else { + memcpy(moduleparams->cfgfilepath, v, strlen(v)); + } + } else { + return CRYPTO_MOD_UNKNOWN_PARAM_ERR; + } + + return CRYPT_MOD_OK; + +} + +static int check_module_params(ModuleParams *moduleparams) +{ + if (moduleparams->libpath[0] == '\0') { + return CRYPTO_MOD_LIBPATH_INVALID_ERR; + } + + switch (moduleparams->moduletype) { + case MODULE_GDAC_CARD_TYPE: + /*光电安辰密码卡不需要配置文件*/ + if (moduleparams->cfgfilepath[0] != '\0') { + return CRYPTO_MOD_PARAM_TOO_MANY_ERR; + } + break; + case MODULE_JNTA_KMS_TYPE: + /*江南天安的配置文件,需要设置为环境变量使用"TASS_GHVSM_CFG_PATH",在后面加载时自己临时设置使用*/ + if (moduleparams->cfgfilepath[0] == '\0') { + return CRYPTO_MOD_CFG_PATH_INVALID_ERR; + } + break; + case MODULE_SWXA_KMS_TYPE: + /*三未信安kms使用带配置文件路径名称的接口*/ + if (moduleparams->cfgfilepath[0] == '\0') { + return CRYPTO_MOD_CFG_PATH_INVALID_ERR; + } + break; + default: + return CRYPTO_MOD_TYPE_INVALID_ERR; + } + + return CRYPT_MOD_OK; +} + +int parse_module_params(char *paramsstring, ModuleParams *moduleparams) +{ + char *p = NULL; + char *saveptr1 = NULL; + const char *split1 = ","; + const char *split2 = "="; + int ret = 0; + + p = strtok_r(paramsstring, split1, &saveptr1); + + while (p != NULL) { + char *q = NULL; + char *saveptr2 = NULL; + char *tmp_p = NULL; + char *tmp_ptr2 = NULL; + q = strtok_r(p, split2, &saveptr2); + + tmp_p = remove_space(p); + tmp_ptr2 = remove_space(saveptr2); + + ret = set_module_params(tmp_p, tmp_ptr2, moduleparams); + if (ret != CRYPT_MOD_OK) { + free(tmp_p); + free(tmp_ptr2); + return ret; + } + + free(tmp_p); + free(tmp_ptr2); + + q = NULL; + p = strtok_r(NULL, split1, &saveptr1); + } + + return check_module_params(moduleparams); + +} + diff --git a/contrib/common_cipher/common_utils.h b/contrib/common_cipher/common_utils.h new file mode 100755 index 0000000000..2f9d6684b9 --- /dev/null +++ b/contrib/common_cipher/common_utils.h @@ -0,0 +1,52 @@ +#ifndef COMMON_UTILS_H +#define COMMON_UTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + MODULE_INVALID_TYPE = 0, + MODULE_GDAC_CARD_TYPE, /*光电安辰密码卡*/ + MODULE_JNTA_KMS_TYPE, /*江南天安KMS*/ + MODULE_SWXA_KMS_TYPE /*三未信安KMS*/ +} ModuleType; + +/*定义以下字符串宏,用来对输入的kv结构字符串做解析,获取对应的value*/ +#define MODLUE_TYPE "MODLUE_TYPE" +#define MODLUE_LIB_PATH "MODLUE_LIB_PATH" +#define MODULE_CONFIG_FILE_PATH "MODULE_CONFIG_FILE_PATH" + +/*支持的硬件类型字符串*/ +#define MODULE_GDAC_CARD_STR "GDACCARD" +#define MODULE_JNTA_KMS_STR "JNTAKMS" +#define MODULE_SWXA_KMS_STR "SWXAKMS" + +#define IS_GDAC_CARD_TYPE(s) (strcmp(s, MODULE_GDAC_CARD_STR) == 0) +#define IS_JNTA_KMS_TYPE(s) (strcmp(s, MODULE_JNTA_KMS_STR) == 0) +#define IS_SWXA_KMS_TYPE(s) (strcmp(s, MODULE_SWXA_KMS_STR) == 0) + +/*字符串转为枚举类型*/ +#define GET_MODULE_TYPE(s) (IS_GDAC_CARD_TYPE(s) ? MODULE_GDAC_CARD_TYPE \ + : IS_JNTA_KMS_TYPE(s) ? MODULE_JNTA_KMS_TYPE \ + : IS_SWXA_KMS_TYPE(s) ? MODULE_SWXA_KMS_TYPE : MODULE_INVALID_TYPE) + +#define IS_MODULE_TYPE(s) (strcmp(s, MODLUE_TYPE) == 0) +#define IS_MODULE_LIB_PATH(s) (strcmp(s, MODLUE_LIB_PATH) == 0) +#define IS_MODULE_CONFIG_FILE_PATH(s) (strcmp(s, MODULE_CONFIG_FILE_PATH) == 0) + +#define MODULE_MAX_PATH_LEN 1024 + +typedef struct { + ModuleType moduletype; + char libpath[MODULE_MAX_PATH_LEN]; + char cfgfilepath[MODULE_MAX_PATH_LEN]; +}ModuleParams; + +extern int parse_module_params(char *paramsstring, ModuleParams *moduleparams); + +#ifdef __cplusplus +} +#endif + +#endif /* COMMON_UTILS_H */ diff --git a/contrib/common_cipher/test.cpp b/contrib/common_cipher/test.cpp new file mode 100755 index 0000000000..05e280e756 --- /dev/null +++ b/contrib/common_cipher/test.cpp @@ -0,0 +1,321 @@ +#include +#include +#include +#include +#include + +#define MAX_PROVIDER_NAME_LEN 128 +#define MAX_ERRMSG_LEN 256 + +static pthread_rwlock_t drivermutex; + +typedef enum { + MODULE_AES_128_CBC = 0, + MODULE_AES_128_CTR, + MODULE_AES_128_GCM, + MODULE_AES_256_CBC, + MODULE_AES_256_CTR, + MODULE_AES_256_GCM, + MODULE_SM4_CBC, + MODULE_SM4_CTR, + MODULE_HMAC_SHA256, + MODULE_HMAC_SM3, + MODULE_DETERMINISTIC_KEY, + MODULE_ALGO_MAX = 1024 +} ModuleSymmKeyAlgo; + + +typedef enum { + MODULE_SHA256 = 0, + MODULE_SM3, + MODULE_DIGEST_MAX = 1024 +} ModuleDigestAlgo; + + +typedef enum { + KEY_TYPE_INVALID, + KEY_TYPE_PLAINTEXT, + KEY_TYPE_CIPHERTEXT, + KEY_TYPE_NAMEORIDX, + KEY_TYPE_MAX +} KeyType; + + +typedef struct { + char provider_name[MAX_PROVIDER_NAME_LEN]; + KeyType key_type; + int supported_symm[MODULE_ALGO_MAX]; // 不支持算法填入0或者支持算法填入1 + int supported_digest[MODULE_DIGEST_MAX]; // 不支持算法填入0或者支持算法填入1 +} SupportedFeature; + + +typedef int (*crypto_module_init_type)(char *load_info, SupportedFeature *supported_feature); +typedef int (*crypto_module_sess_init_type)(char *key_info, void **sess); +typedef void (*crypto_module_sess_exit_type)(void *sess); +typedef int (*crypto_create_symm_key_type)(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size); +typedef int (*crypto_ctx_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size); +typedef int (*crypto_result_size_type)(void *ctx, int enc, size_t data_size); +typedef void (*crypto_ctx_clean_type)(void *ctx); +typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); +typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); +typedef void (*crypto_hmac_clean_type)(void *ctx); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); +typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); + + +void *libhandle = NULL; +crypto_module_init_type crypto_module_init_use = NULL; +crypto_module_sess_init_type crypto_module_sess_init_use = NULL; +crypto_module_sess_exit_type crypto_module_sess_exit_use = NULL; +crypto_create_symm_key_type crypto_create_symm_key_use = NULL; +crypto_ctx_init_type crypto_ctx_init_use = NULL; +crypto_result_size_type crypto_result_size_use = NULL; +crypto_ctx_clean_type crypto_ctx_clean_use = NULL; +crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; +crypto_digest_type crypto_digest_use = NULL; +crypto_hmac_init_type crypto_hmac_init_use = NULL; +crypto_hmac_clean_type crypto_hmac_clean_use = NULL; +crypto_hmac_type crypto_hmac_use = NULL; +crypto_gen_random_type crypto_gen_random_use = NULL; +crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; +crypto_get_errmsg_type crypto_get_errmsg_use = NULL; + +static void load_lib() +{ + libhandle = dlopen("/home//vastbase/contrib/common_cipher/libcommoncipher.so", RTLD_LAZY); + crypto_module_init_use = (crypto_module_init_type)dlsym(libhandle, "crypto_module_init"); + crypto_module_sess_init_use = (crypto_module_sess_init_type)dlsym(libhandle, "crypto_module_sess_init"); + crypto_module_sess_exit_use = (crypto_module_sess_exit_type)dlsym(libhandle, "crypto_module_sess_exit"); + crypto_create_symm_key_use = (crypto_create_symm_key_type)dlsym(libhandle, "crypto_create_symm_key"); + crypto_ctx_init_use = (crypto_ctx_init_type)dlsym(libhandle, "crypto_ctx_init"); + crypto_result_size_use = (crypto_result_size_type)dlsym(libhandle, "crypto_result_size"); + crypto_ctx_clean_use = (crypto_ctx_clean_type)dlsym(libhandle, "crypto_ctx_clean"); + crypto_encrypt_decrypt_use = (crypto_encrypt_decrypt_type)dlsym(libhandle, "crypto_encrypt_decrypt"); + crypto_digest_use = (crypto_digest_type)dlsym(libhandle, "crypto_digest"); + crypto_hmac_init_use = (crypto_hmac_init_type)dlsym(libhandle, "crypto_hmac_init"); + crypto_hmac_clean_use = (crypto_hmac_clean_type)dlsym(libhandle, "crypto_hmac_clean"); + crypto_hmac_use = (crypto_hmac_type)dlsym(libhandle, "crypto_hmac"); + crypto_gen_random_use = (crypto_gen_random_type)dlsym(libhandle, "crypto_gen_random"); + crypto_deterministic_enc_dec_use = (crypto_deterministic_enc_dec_type)dlsym(libhandle, "crypto_deterministic_enc_dec"); + crypto_get_errmsg_use = (crypto_get_errmsg_type)dlsym(libhandle, "crypto_get_errmsg"); + +} + +static void* one_thread_func(void *data) +{ + int ret = 1; + int i = 0; + char options[] = {"MODLUE_TYPE=JNTAKMS,MODLUE_LIB_PATH=/home//vastbase/contrib/common_cipher/libTassSDF4GHVSM.so,MODULE_CONFIG_FILE_PATH=/home//vastbase/contrib/common_cipher/"}; + SupportedFeature supportedfeature; + char errmsg[MAX_ERRMSG_LEN] = {0}; + void *session = NULL; + unsigned char key[32] = {0}; + long unsigned int keylen = 0; + void *keyctx = NULL; + unsigned char srcdata[] = {"12345678"}; + unsigned char plaint[32] = {0}; + long unsigned int plaintlen = 32; + unsigned char encdata[32] = {0}; + long unsigned int enclen = 32; + unsigned char encdata2[32] = {0}; + long unsigned int enclen2 = 32; + unsigned char hashdata[32] = {0}; + long unsigned int hashlen = 0; + unsigned char hmacdata[32] = {0}; + long unsigned int hmaclen = 0; + long unsigned int needlen = 16; + char random[32] = {0}; + unsigned char iv[] = {"1234567812345678"}; + + ret = crypto_module_init_use(options, &supportedfeature); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_module_init error,errmsg:%s\n", errmsg); + return NULL; + } else { + printf("crypto_module_init success\n"); + printf("provider_name = %s,key_type = %d\n",supportedfeature.provider_name,supportedfeature.key_type); + for (i = 0; i < MODULE_ALGO_MAX;i++) { + if (supportedfeature.supported_symm[i] == 1) + printf("supported_symm[%d]\n",i); + } + for (i = 0; i < MODULE_DIGEST_MAX;i++) { + if (supportedfeature.supported_digest[i] == 1) + printf("supported_digest[%d]\n",i); + } + } + + ret = crypto_module_sess_init_use(NULL, &session); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_module_sess_init error,errmsg:%s\n", errmsg); + return NULL; + } else { + printf("crypto_module_sess_init success\n"); + } + + ret = crypto_create_symm_key_use(session, MODULE_SM4_CBC, key, &keylen); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_create_symm_key_use error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_create_symm_key_use success\n"); + printf("key = %s\n",key); + } + + ret = crypto_ctx_init_use(session, &keyctx, MODULE_SM4_CBC, 1, key, keylen); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_ctx_init_use error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_ctx_init_use success\n"); + } + + ret = crypto_encrypt_decrypt_use(keyctx, 1, srcdata, 8, iv, 16, encdata, &enclen, NULL); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_encrypt_decrypt_use enc error,errmsg:%s\n", errmsg); + crypto_ctx_clean_use(keyctx); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_encrypt_decrypt_use enc success\n"); + } + + ret = crypto_encrypt_decrypt_use(keyctx, 0, encdata, enclen, iv, 16, plaint, &plaintlen, NULL); + if (ret != 1 || strcmp((char*)plaint, (char*)srcdata)) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_encrypt_decrypt_use dec error,errmsg:%s\n", errmsg); + crypto_ctx_clean_use(keyctx); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_encrypt_decrypt_use dec success\n"); + } + + printf("enc nedd len:%d\n",crypto_result_size_use(keyctx, 1, needlen)); + + printf("dec nedd len:%d\n",crypto_result_size_use(keyctx, 0, needlen)); + + crypto_ctx_clean_use(keyctx); + + ret = crypto_digest_use(session, MODULE_SM3, plaint, plaintlen, hashdata, &hashlen); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_digest_use error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_digest_use success\n"); + printf("hashlen = %ld\n", hashlen); + } + + ret = crypto_hmac_init_use(session, &keyctx, MODULE_HMAC_SM3, key, keylen); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_hmac_init_use error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_hmac_init_use success\n"); + } + + ret = crypto_hmac_use(keyctx, plaint, plaintlen, hmacdata, &hmaclen); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_hmac_use error,errmsg:%s\n", errmsg); + crypto_hmac_clean_use(keyctx); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_hmac_use success\n"); + printf("hmaclen = %ld\n", hmaclen); + } + + crypto_hmac_clean_use(keyctx); + + ret = crypto_gen_random_use(session, random, 31); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_gen_random_use error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_gen_random_use success\n"); + } + + ret = crypto_deterministic_enc_dec_use(session, 1, plaint, key, keylen, plaintlen, encdata, &enclen); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_deterministic_enc_dec_use1 enc error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_deterministic_enc_dec_use1 enc success\n"); + } + + ret = crypto_deterministic_enc_dec_use(session, 1, plaint, key, keylen, plaintlen, encdata2, &enclen2); + if (ret != 1 || strcmp((char*)encdata, (char*)encdata2)) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_deterministic_enc_dec_use2 enc error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_deterministic_enc_dec_use2 enc success\n"); + } + + ret = crypto_deterministic_enc_dec_use(session, 0, encdata2, key, keylen, enclen2, plaint, &plaintlen); + if (ret != 1 || strcmp((char*)plaint, (char*)srcdata)) + { + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_deterministic_enc_dec_use2 dec error,errmsg:%s\n", errmsg); + crypto_module_sess_exit_use(session); + return NULL; + } else { + printf("crypto_deterministic_enc_dec_use2 dec success\n"); + } + + crypto_get_errmsg_use(NULL, errmsg); + printf("crypto_get_errmsg_use errmsg:%s\n", errmsg); + + crypto_module_sess_exit_use(session); + + return NULL; +} + +int main() +{ + pthread_t t1, t2, t3, t4; + + load_lib(); + + pthread_create(&t1,0,one_thread_func,NULL); + pthread_create(&t2,0,one_thread_func,NULL); + pthread_create(&t3,0,one_thread_func,NULL); + pthread_create(&t4,0,one_thread_func,NULL); + pthread_join(t1,NULL); + pthread_join(t2,NULL); + pthread_join(t3,NULL); + pthread_join(t4,NULL); + + return 0; +} -- Gitee From abff4b07eac3afc691d477d0244dff51fa598bd1 Mon Sep 17 00:00:00 2001 From: lyoursly Date: Tue, 27 Aug 2024 17:28:33 +0800 Subject: [PATCH 250/347] =?UTF-8?q?=E6=94=AF=E6=8C=81=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E5=A4=87=E4=BB=BDp=E5=92=8Cd=E6=A0=BC=E5=BC=8F=E7=9A=84?= =?UTF-8?q?=E7=A1=AC=E4=BB=B6=E5=8A=A0=E5=AF=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/CMakeLists.txt | 2 + src/bin/pg_dump/Makefile | 2 +- src/bin/pg_dump/common_cipher.cpp | 313 ++++++++++++++++++++++++ src/bin/pg_dump/pg_backup.h | 23 +- src/bin/pg_dump/pg_backup_archiver.cpp | 28 ++- src/bin/pg_dump/pg_backup_cipher.h | 37 +++ src/bin/pg_dump/pg_backup_directory.cpp | 267 +++++++++++++++++++- src/bin/pg_dump/pg_dump.cpp | 86 ++++--- src/bin/pg_dump/pg_restore.cpp | 50 +++- src/bin/psql/CMakeLists.txt | 1 + src/bin/psql/Makefile | 2 +- src/bin/psql/common_cipher.cpp | 306 +++++++++++++++++++++++ src/bin/psql/common_cipher.h | 37 +++ src/bin/psql/startup.cpp | 31 ++- src/gausskernel/cbb/utils/aes/aes.cpp | 79 ++++-- src/include/utils/aes.h | 13 +- 16 files changed, 1192 insertions(+), 85 deletions(-) create mode 100644 src/bin/pg_dump/common_cipher.cpp create mode 100644 src/bin/pg_dump/pg_backup_cipher.h create mode 100644 src/bin/psql/common_cipher.cpp create mode 100644 src/bin/psql/common_cipher.h diff --git a/src/bin/pg_dump/CMakeLists.txt b/src/bin/pg_dump/CMakeLists.txt index 4d8872cf03..b65f7f69d8 100755 --- a/src/bin/pg_dump/CMakeLists.txt +++ b/src/bin/pg_dump/CMakeLists.txt @@ -21,6 +21,7 @@ set(TGT_dump_SRC ${CMAKE_CURRENT_SOURCE_DIR}/pg_backup_tar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/pg_backup_directory.cpp ${CMAKE_CURRENT_SOURCE_DIR}/compress_io.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/common_cipher.cpp ) set(TGT_dumpall_SRC @@ -47,6 +48,7 @@ set(TGT_restore_SRC ${CMAKE_CURRENT_SOURCE_DIR}/pg_backup_tar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/pg_backup_directory.cpp ${CMAKE_CURRENT_SOURCE_DIR}/compress_io.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/common_cipher.cpp ) set(TGT_dump_INC ${PROJECT_SRC_DIR}/common/port diff --git a/src/bin/pg_dump/Makefile b/src/bin/pg_dump/Makefile index 8a4731236d..4a47e19425 100644 --- a/src/bin/pg_dump/Makefile +++ b/src/bin/pg_dump/Makefile @@ -56,7 +56,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif OBJS= pg_backup_archiver.o pg_backup_db.o pg_backup_custom.o \ pg_backup_null.o pg_backup_tar.o \ - pg_backup_directory.o dumpmem.o dumputils.o compress_io.o $(WIN32RES) + pg_backup_directory.o dumpmem.o dumputils.o compress_io.o common_cipher.o $(WIN32RES) LIBS += -lcjson -lcurl -lz ifeq ($(enable_lite_mode), no) diff --git a/src/bin/pg_dump/common_cipher.cpp b/src/bin/pg_dump/common_cipher.cpp new file mode 100644 index 0000000000..3b1a06333f --- /dev/null +++ b/src/bin/pg_dump/common_cipher.cpp @@ -0,0 +1,313 @@ +#include "pg_backup_cipher.h" +#include "port.h" + +#define MAX_PROVIDER_NAME_LEN 128 +#define MAX_ERRMSG_LEN 256 + +typedef enum { + MODULE_AES_128_CBC = 0, + MODULE_AES_128_CTR, + MODULE_AES_128_GCM, + MODULE_AES_256_CBC, + MODULE_AES_256_CTR, + MODULE_AES_256_GCM, + MODULE_SM4_CBC, + MODULE_SM4_CTR, + MODULE_HMAC_SHA256, + MODULE_HMAC_SM3, + MODULE_DETERMINISTIC_KEY, + MODULE_ALGO_MAX = 1024 +} ModuleSymmKeyAlgo; + +typedef enum { + MODULE_SHA256 = 0, + MODULE_SM3, + MODULE_DIGEST_MAX = 1024 +} ModuleDigestAlgo; + +typedef enum { + KEY_TYPE_INVALID, + KEY_TYPE_PLAINTEXT, + KEY_TYPE_CIPHERTEXT, + KEY_TYPE_NAMEORIDX, + KEY_TYPE_MAX +} KeyType; + +typedef struct { + char provider_name[MAX_PROVIDER_NAME_LEN]; + KeyType key_type; + int supported_symm[MODULE_ALGO_MAX]; // 不支持算法填入0或者支持算法填入1 + int supported_digest[MODULE_DIGEST_MAX]; // 不支持算法填入0或者支持算法填入1 +} SupportedFeature; + +typedef int (*crypto_module_init_type)(char *load_info, SupportedFeature *supported_feature); +typedef int (*crypto_module_sess_init_type)(char *key_info, void **sess); +typedef void (*crypto_module_sess_exit_type)(void *sess); +typedef int (*crypto_create_symm_key_type)(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size); +typedef int (*crypto_ctx_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size); +typedef int (*crypto_result_size_type)(void *ctx, int enc, size_t data_size); +typedef void (*crypto_ctx_clean_type)(void *ctx); +typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); +typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); +typedef void (*crypto_hmac_clean_type)(void *ctx); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); +typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); + +static void *libhandle = NULL; + +static crypto_module_init_type crypto_module_init_use = NULL; +static crypto_module_sess_init_type crypto_module_sess_init_use = NULL; +static crypto_module_sess_exit_type crypto_module_sess_exit_use = NULL; +static crypto_create_symm_key_type crypto_create_symm_key_use = NULL; +static crypto_ctx_init_type crypto_ctx_init_use = NULL; +static crypto_result_size_type crypto_result_size_use = NULL; +static crypto_ctx_clean_type crypto_ctx_clean_use = NULL; +crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; +static crypto_digest_type crypto_digest_use = NULL; +static crypto_hmac_init_type crypto_hmac_init_use = NULL; +static crypto_hmac_clean_type crypto_hmac_clean_use = NULL; +static crypto_hmac_type crypto_hmac_use = NULL; +static crypto_gen_random_type crypto_gen_random_use = NULL; +static crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; +static crypto_get_errmsg_type crypto_get_errmsg_use = NULL; + +bool load_crypto_module_lib() +{ + errno_t rc = 0; + char libpath[1024] = {0}; + char* gaussHome = gs_getenv_r("GAUSSHOME"); + + rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); + securec_check_ss_c(rc, "", ""); + + libhandle = dlopen(libpath, RTLD_LAZY); + if (libhandle == NULL) { + return false; + } + crypto_module_init_use = (crypto_module_init_type)dlsym(libhandle, "crypto_module_init"); + crypto_module_sess_init_use = (crypto_module_sess_init_type)dlsym(libhandle, "crypto_module_sess_init"); + crypto_module_sess_exit_use = (crypto_module_sess_exit_type)dlsym(libhandle, "crypto_module_sess_exit"); + crypto_create_symm_key_use = (crypto_create_symm_key_type)dlsym(libhandle, "crypto_create_symm_key"); + crypto_ctx_init_use = (crypto_ctx_init_type)dlsym(libhandle, "crypto_ctx_init"); + crypto_result_size_use = (crypto_result_size_type)dlsym(libhandle, "crypto_result_size"); + crypto_ctx_clean_use = (crypto_ctx_clean_type)dlsym(libhandle, "crypto_ctx_clean"); + crypto_encrypt_decrypt_use = (crypto_encrypt_decrypt_type)dlsym(libhandle, "crypto_encrypt_decrypt"); + crypto_digest_use = (crypto_digest_type)dlsym(libhandle, "crypto_digest"); + crypto_hmac_init_use = (crypto_hmac_init_type)dlsym(libhandle, "crypto_hmac_init"); + crypto_hmac_clean_use = (crypto_hmac_clean_type)dlsym(libhandle, "crypto_hmac_clean"); + crypto_hmac_use = (crypto_hmac_type)dlsym(libhandle, "crypto_hmac"); + crypto_gen_random_use = (crypto_gen_random_type)dlsym(libhandle, "crypto_gen_random"); + crypto_deterministic_enc_dec_use = (crypto_deterministic_enc_dec_type)dlsym(libhandle, "crypto_deterministic_enc_dec"); + crypto_get_errmsg_use = (crypto_get_errmsg_type)dlsym(libhandle, "crypto_get_errmsg"); + + if (crypto_module_init_use == NULL + || crypto_module_sess_init_use == NULL + || crypto_module_sess_exit_use == NULL + || crypto_create_symm_key_use == NULL + || crypto_ctx_init_use == NULL + || crypto_result_size_use == NULL + || crypto_ctx_clean_use == NULL + || crypto_encrypt_decrypt_use == NULL + || crypto_digest_use == NULL + || crypto_hmac_init_use == NULL + || crypto_hmac_clean_use == NULL + || crypto_hmac_use == NULL + || crypto_gen_random_use == NULL + || crypto_deterministic_enc_dec_use == NULL + || crypto_get_errmsg_use == NULL) { + dlclose(libhandle); + return false; + } + + return true; +} + +void unload_crypto_module(int code, void* args) +{ + if (libhandle) { + dlclose(libhandle); + libhandle = NULL; + } + +} + +static int transform_type(char* type) +{ + if (strcmp(type, "AES128_CBC") == 0) { + return MODULE_AES_128_CBC; + } else if (strcmp(type, "AES128_CTR") == 0) { + return MODULE_AES_128_CTR; + } else if (strcmp(type, "AES128_GCM") == 0) { + return MODULE_AES_128_GCM; + } else if (strcmp(type, "AES256_CBC") == 0) { + return MODULE_AES_256_CBC; + } else if (strcmp(type, "AES256_CTR") == 0) { + return MODULE_AES_256_CTR; + } else if (strcmp(type, "AES256_GCM") == 0) { + return MODULE_AES_256_GCM; + } else if (strcmp(type, "SM4_CBC") == 0) { + return MODULE_SM4_CBC; + } else if (strcmp(type, "SM4_CTR") == 0) { + return MODULE_SM4_CTR; + } + + return -1; + +} + +void initCryptoModule(ArchiveHandle* AH) +{ + int ret = 1; + SupportedFeature supportedfeature; + int modulType = 0; + Archive* fort = (Archive*)AH; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_module_init_use(fort->crypto_modlue_params, &supportedfeature); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + exit_horribly(NULL, "%s\n", errmsg); + } + + modulType = transform_type(fort->crypto_type); + if (modulType < 0 || supportedfeature.supported_symm[modulType] == 0) { + exit_horribly(NULL, "unsupported this mode:%s\n", fort->crypto_type); + } + +} + +void initCryptoSession(ArchiveHandle* AH) +{ + int ret = 1; + Archive* fort = (Archive*)AH; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_module_sess_init_use(NULL, &(fort->cryptoModlueCtx.moduleSession)); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + exit_horribly(NULL, "%s\n", errmsg); + } + +} + +void releaseCryptoSession(int code, void* args) +{ + if (libhandle && ((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.moduleSession) { + crypto_module_sess_exit_use(((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.moduleSession); + ((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.moduleSession = NULL; + } +} + +void initCryptoKeyCtx(ArchiveHandle* AH) +{ + int ret = 1; + int enc = (AH->mode == archModeWrite) ? 1 : 0; + Archive* fort = (Archive*)AH; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_ctx_init_use(fort->cryptoModlueCtx.moduleSession, &(fort->cryptoModlueCtx.key_ctx), (ModuleSymmKeyAlgo)transform_type(fort->crypto_type), enc, fort->Key, fort->keylen); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + crypto_module_sess_exit_use(fort->cryptoModlueCtx.moduleSession); + exit_horribly(NULL, "%s\n", errmsg); + } +} + +void releaseCryptoCtx(int code, void* args) +{ + if (libhandle && ((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.key_ctx) { + crypto_ctx_clean_use(((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.key_ctx); + ((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.key_ctx = NULL; + } +} + +void symmGenerateKey(ArchiveHandle* AH) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_create_symm_key_use(AH->publicArc.cryptoModlueCtx.moduleSession, (ModuleSymmKeyAlgo)transform_type(AH->publicArc.crypto_type), AH->publicArc.Key, (size_t*)&(AH->publicArc.keylen)); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + releaseCryptoSession(0, AH); + unload_crypto_module(0, NULL); + exit_horribly(NULL, "%s\n", errmsg); + } +} + +void symmEncDec(ArchiveHandle* AH, bool isEnc, char* indata, int inlen, char* outdata, int* outlen) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_encrypt_decrypt_use(AH->publicArc.cryptoModlueCtx.key_ctx, isEnc, (unsigned char*)indata, inlen, AH->publicArc.rand, 16, (unsigned char*)outdata, (size_t*)outlen, NULL); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + releaseCryptoCtx(0, AH); + releaseCryptoSession(0, AH); + unload_crypto_module(0, NULL); + exit_horribly(NULL, "%s\n", errmsg); + } +} + +void CryptoModuleParamsCheck(ArchiveHandle* AH, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt, bool is_gen_key) +{ + errno_t rc = 0; + Archive *fout = (Archive*)AH; + + if (!load_crypto_module_lib()) { + exit_horribly(NULL, "load crypto module lib failed\n"); + } + + rc = memcpy_s((GS_UCHAR*)fout->crypto_modlue_params, CRYPTO_MODULE_PARAMS_MAX_LEN, params, strlen(params)); + securec_check_c(rc, "\0", "\0"); + + if (module_encrypt_mode == NULL) { + exit_horribly(NULL, "encrypt_mode cannot be NULL\n"); + } else { + rc = memcpy_s((GS_UCHAR*)fout->crypto_type, CRYPTO_MODULE_ENC_TYPE_MAX_LEN, module_encrypt_mode, strlen(module_encrypt_mode)); + securec_check_c(rc, "\0", "\0"); + } + + if (module_encrypt_salt == NULL || strlen(module_encrypt_salt) != 16) { + exit_horribly(NULL, "salt is needed and must be 16 bytes\n"); + } else { + rc = memcpy_s((GS_UCHAR*)fout->rand, RANDOM_LEN + 1, module_encrypt_salt, strlen(module_encrypt_salt)); + securec_check_c(rc, "\0", "\0"); + } + + initCryptoModule(AH); + initCryptoSession(AH); + + if (module_encrypt_key && is_gen_key) { + exit_horribly(NULL, "can not use with-key and gen-key together\n"); + } else if (module_encrypt_key) { + char *tmpkey = NULL; + unsigned int tmpkeylen = 0; + + tmpkey = SEC_decodeBase64(module_encrypt_key, &tmpkeylen); + if (tmpkey == NULL || tmpkeylen > KEY_MAX_LEN) { + if (tmpkey) { + OPENSSL_free(tmpkey); + } + exit_horribly(NULL, "invalid key\n"); + } else { + rc = memcpy_s((GS_UCHAR*)fout->Key, KEY_MAX_LEN, tmpkey, tmpkeylen); + securec_check_c(rc, "\0", "\0"); + fout->keylen = tmpkeylen; + } + } else if (is_gen_key){ + char *encodedley = NULL; + symmGenerateKey((ArchiveHandle*)fout); + encodedley = SEC_encodeBase64((char*)fout->Key, fout->keylen); + write_msg(NULL, "generate key success:%s\n", encodedley); + + } + + initCryptoKeyCtx((ArchiveHandle*)fout); + + fout->encryptfile = true; + +} diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index 417b8936d2..94956c384a 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -40,6 +40,9 @@ #define oidge(x, y) ((x) >= (y)) #define oidzero(x) ((x) == 0) +#define CRYPTO_MODULE_PARAMS_MAX_LEN 1024 +#define CRYPTO_MODULE_ENC_TYPE_MAX_LEN 16 + enum trivalue { TRI_DEFAULT, TRI_NO, TRI_YES }; typedef enum _archiveFormat { @@ -59,6 +62,11 @@ typedef enum _teSection { SECTION_POST_DATA /* stuff to be processed after data */ } teSection; +typedef struct { + void *moduleSession; + void *key_ctx; +}CryptoModuleCtx; + /* * We may want to have some more user-readable data, but in the mean * time this gives us some abstraction and type checking. @@ -83,10 +91,15 @@ struct Archive { /* Database Security: Data importing/dumping support AES128. */ bool encryptfile; unsigned char Key[KEY_MAX_LEN]; + int keylen; /* Data importing/dumping support AES128 through OPENSSL */ unsigned char rand[RANDOM_LEN + 1]; + char crypto_type[CRYPTO_MODULE_ENC_TYPE_MAX_LEN]; + char crypto_modlue_params[CRYPTO_MODULE_PARAMS_MAX_LEN]; + CryptoModuleCtx cryptoModlueCtx; + /* get hash bucket info. */ bool getHashbucketInfo; /* The rest is private */ @@ -153,6 +166,14 @@ typedef struct _restoreOptions { bool* idWanted; /* array showing which dump IDs to emit */ } RestoreOptions; +typedef struct { + char* module_params; + char* mode; + char* key; + char* salt; + bool genkey; +}CryptoModuleCheckParam; + /* * Main archiver interface. */ @@ -186,7 +207,7 @@ extern void SetArchiveRestoreOptions(Archive* AH, RestoreOptions* ropt); extern void RestoreArchive(Archive* AH); /* Open an existing archive */ -extern Archive* OpenArchive(const char* FileSpec, const ArchiveFormat fmt); +extern Archive* OpenArchive(const char* FileSpec, const ArchiveFormat fmt, CryptoModuleCheckParam* cryptoModuleCheckParam = NULL); /* Create a new archive */ extern Archive* CreateArchive(const char* FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode); diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp index dfd4491c94..f762fbf3b6 100644 --- a/src/bin/pg_dump/pg_backup_archiver.cpp +++ b/src/bin/pg_dump/pg_backup_archiver.cpp @@ -25,6 +25,7 @@ #include "dumputils.h" #include "catalog/pg_database.h" #include "catalog/pg_extension.h" +#include "pg_backup_cipher.h" /* Database Security: Data importing/dumping support AES128. */ #include "compress_io.h" @@ -143,7 +144,7 @@ static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; static bool disable_progress; #endif -static ArchiveHandle* _allocAH(const char* FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode); +static ArchiveHandle* _allocAH(const char* FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode, CryptoModuleCheckParam* cryptoModluleCheckParam = NULL); static void _getObjectDescription(PQExpBuffer buf, TocEntry* te, ArchiveHandle* AH); static void _printTocEntry(ArchiveHandle* AH, TocEntry* te, RestoreOptions* ropt, bool isData, bool acl_pass); static char* replace_line_endings(const char* str); @@ -223,9 +224,9 @@ Archive* CreateArchive(const char* FileSpec, const ArchiveFormat fmt, const int /* Open an existing archive */ /* Public */ -Archive* OpenArchive(const char* FileSpec, const ArchiveFormat fmt) +Archive* OpenArchive(const char* FileSpec, const ArchiveFormat fmt, CryptoModuleCheckParam* cryptoModluleCheckParam) { - ArchiveHandle* AH = _allocAH(FileSpec, fmt, 0, archModeRead); + ArchiveHandle* AH = _allocAH(FileSpec, fmt, 0, archModeRead, cryptoModluleCheckParam); return (Archive*)AH; } @@ -574,9 +575,9 @@ void RestoreArchive(Archive* AHX) SetOutput(AH, ropt->filename, ropt->compression); /* - * Put the rand value to encrypt file for decrypt. + * Put the rand value to encrypt file for decrypt if use soft crypto. */ - if ((true == AHX->encryptfile) && (NULL == encrypt_salt)) { + if ((true == AHX->encryptfile) && (NULL == encrypt_salt) && AHX->crypto_modlue_params[0] == '\0') { p = (char*)pg_malloc(RANDOM_LEN + 1); rc = memset_s(p, RANDOM_LEN + 1, 0, RANDOM_LEN + 1); securec_check_c(rc, "\0", "\0"); @@ -1730,7 +1731,9 @@ int ahwrite(const void* ptr, size_t size, size_t nmemb, ArchiveHandle* AH) (size * nmemb), MAX_DECRYPT_BUFF_LEN, AH->publicArc.Key, - AH->publicArc.rand); + AH->publicArc.rand, + AH->publicArc.cryptoModlueCtx.key_ctx, + crypto_encrypt_decrypt_use); if (!encrypt_result) exit_horribly(modulename, "Encryption failed: %s\n", strerror(errno)); } else { @@ -2231,7 +2234,7 @@ static int _discoverArchiveFormat(ArchiveHandle* AH) /* * Allocate an archive handle */ -static ArchiveHandle* _allocAH(const char* FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode) +static ArchiveHandle* _allocAH(const char* FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode, CryptoModuleCheckParam* cryptoModuleCheckParam) { ArchiveHandle* AH = NULL; @@ -2316,6 +2319,10 @@ static ArchiveHandle* _allocAH(const char* FileSpec, const ArchiveFormat fmt, co AH->promptPassword = TRI_DEFAULT; + if (cryptoModuleCheckParam && cryptoModuleCheckParam->module_params) { + CryptoModuleParamsCheck(AH, cryptoModuleCheckParam->module_params, cryptoModuleCheckParam->mode, cryptoModuleCheckParam->key, cryptoModuleCheckParam->salt, cryptoModuleCheckParam->genkey); + } + switch (AH->format) { case archCustom: InitArchiveFmt_Custom(AH); @@ -3720,6 +3727,9 @@ void on_exit_close_archive(Archive* AHX) { shutdown_info.AHX = AHX; on_exit_nicely(archive_close_connection, &shutdown_info); + on_exit_nicely(releaseCryptoCtx, AHX); + on_exit_nicely(releaseCryptoSession, AHX); + on_exit_nicely(unload_crypto_module, NULL); } /* @@ -4729,8 +4739,8 @@ void encryptArchive(Archive* fout, const ArchiveFormat fmt) if (!fout->encryptfile) return; - /* for plain format, encrypted in previous process. */ - if (fmt != archDirectory) + /* for plain format, encrypted in previous process. use crypto module encrypted in previous process. */ + if (fmt != archDirectory || fout->crypto_modlue_params) return; fileSpec = gs_strdup(AH->fSpec); diff --git a/src/bin/pg_dump/pg_backup_cipher.h b/src/bin/pg_dump/pg_backup_cipher.h new file mode 100644 index 0000000000..089cdaa2da --- /dev/null +++ b/src/bin/pg_dump/pg_backup_cipher.h @@ -0,0 +1,37 @@ +#ifndef COMMON_CIPHER_H +#define COMMON_CIPHER_H +#include +#include +#include +#include +#include +#include +#include "pg_backup_archiver.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_CRYPTO_CACHE_LEN 8192 +#define CRYPTO_BLOCK_SIZE 16 +#define MAX_WRITE_CACHE_LEN (MAX_CRYPTO_CACHE_LEN - CRYPTO_BLOCK_SIZE) /*加密算法补pad模式为强补,最多可以补16字节,所以写缓存少16字节,则密文最长8192、保证读取时可以整块密文读入*/ + +typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +extern crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use; + +extern bool load_crypto_module_lib(); +extern void unload_crypto_module(int code, void* args); +extern void initCryptoModule(ArchiveHandle* AH); +extern void initCryptoSession(ArchiveHandle* AH); +extern void releaseCryptoSession(int code, void* args); +extern void initCryptoKeyCtx(ArchiveHandle* AH); +extern void releaseCryptoCtx(int code, void* args); +extern void symmEncDec(ArchiveHandle* AH, bool isEnc, char* indata, int inlen, char* outdata, int* outlen); +extern void symmGenerateKey(ArchiveHandle* AH); +extern void CryptoModuleParamsCheck(ArchiveHandle* AH, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt, bool is_gen_key); + +#ifdef __cplusplus +} +#endif + +#endif /*COMMON_CIPHER_H*/ diff --git a/src/bin/pg_dump/pg_backup_directory.cpp b/src/bin/pg_dump/pg_backup_directory.cpp index bbb6c23b94..b71ac8169f 100644 --- a/src/bin/pg_dump/pg_backup_directory.cpp +++ b/src/bin/pg_dump/pg_backup_directory.cpp @@ -35,11 +35,35 @@ #include "compress_io.h" #include "dumpmem.h" +#include "pg_backup_cipher.h" #ifdef GAUSS_SFT_TEST #include "gauss_sft.h" #endif +typedef enum { + WRITE_CRYPTO_CACHE, + READ_CRYPTO_CACHE +}cryptoCacheType; +typedef struct { + int writeCacheLen; + char writeCache[MAX_WRITE_CACHE_LEN]; +}writeCryptoCache; + +typedef struct { + int readCacheLen; + int readPosition; + char readCache[MAX_CRYPTO_CACHE_LEN]; +}readCryptoCache; + +typedef struct { + cryptoCacheType cacheType; + union { + writeCryptoCache wrCryptoCache; + readCryptoCache rCryptoCache; + }cryptoCache; +}DFormatCryptoCache; + typedef struct { /* * Our archive location. This is basically what the user specified as his @@ -48,6 +72,7 @@ typedef struct { char* directory; cfp* dataFH; /* currently open data file */ + DFormatCryptoCache* dataCryptoCache; cfp* blobsTocFH; /* file handle for blobs.toc */ } lclContext; @@ -83,6 +108,14 @@ static void _LoadBlobs(ArchiveHandle* AH, RestoreOptions* ropt); static char* prependDirectory(ArchiveHandle* AH, const char* relativeFilename); +static void initCryptoCache(ArchiveMode archiveMode, DFormatCryptoCache** cryptoCache); +static void releaseCryptoCache(DFormatCryptoCache* cryptoCache); +static void resetCryptoCache(DFormatCryptoCache* cryptoCache); +static void encryptAndFlushCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH); +static void fillWriteCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH, const void* buf, size_t len); +static void fillReadCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH); +static int readFromCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH, void* buf, size_t len, bool *isempty); + /* * Init routine required by ALL formats. This is a global routine * and should be declared in pg_backup_archiver.h @@ -156,6 +189,10 @@ void InitArchiveFmt_Directory(ArchiveHandle* AH) ctx->dataFH = tocFH; + if (AH->publicArc.encryptfile) { + initCryptoCache(archModeRead, &(ctx->dataCryptoCache)); + } + /* * The TOC of a directory format dump shares the format code of the * tar format. @@ -165,6 +202,10 @@ void InitArchiveFmt_Directory(ArchiveHandle* AH) AH->format = archDirectory; ReadToc(AH); + if (AH->publicArc.encryptfile) { + releaseCryptoCache(ctx->dataCryptoCache); + } + /* Nothing else in the file, so close it again... */ if (cfclose(tocFH) != 0) exit_horribly(modulename, "could not close TOC file: %s\n", strerror(errno)); @@ -299,6 +340,10 @@ static void _StartData(ArchiveHandle* AH, TocEntry* te) ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression); if (ctx->dataFH == NULL) exit_horribly(modulename, "could not open output file \"%s\": %s\n", fname, strerror(errno)); + + if (AH->publicArc.encryptfile) { + initCryptoCache(AH->mode, &(ctx->dataCryptoCache)); + } } /* @@ -317,7 +362,13 @@ static size_t _WriteData(ArchiveHandle* AH, const void* data, size_t dLen) if (dLen == 0) return 0; - return (size_t)cfwrite(data, (int)dLen, ctx->dataFH); + if (ctx->dataCryptoCache) { + fillWriteCryptoCache(AH, ctx->dataCryptoCache, ctx->dataFH, data, dLen); + } else { + return (size_t)cfwrite(data, (int)dLen, ctx->dataFH); + } + + return dLen; } /* @@ -330,6 +381,11 @@ static void _EndData(ArchiveHandle* AH, TocEntry* te) { lclContext* ctx = (lclContext*)AH->formatData; + if (ctx->dataCryptoCache) { + encryptAndFlushCache(AH, ctx->dataCryptoCache, ctx->dataFH); + releaseCryptoCache(ctx->dataCryptoCache); + } + /* Close the file */ (void)cfclose(ctx->dataFH); @@ -356,11 +412,24 @@ static void _PrintFileData(ArchiveHandle* AH, const char* filename, RestoreOptio buf = (char*)pg_malloc(ZLIB_OUT_SIZE); buflen = ZLIB_OUT_SIZE; - while ((cnt = cfread(buf, buflen, pstCfp))) - (void)ahwrite(buf, 1, cnt, AH); + if (AH->publicArc.encryptfile) { + DFormatCryptoCache* readCache = NULL; + bool isEmpty =false; + initCryptoCache(archModeRead, &readCache); + while ((cnt = readFromCryptoCache(AH, readCache, pstCfp, buf, buflen, &isEmpty))) { + (void)ahwrite(buf, 1, cnt, AH); + } + + releaseCryptoCache(readCache); + } else { + while ((cnt = cfread(buf, buflen, pstCfp))) { + (void)ahwrite(buf, 1, cnt, AH); + } + } free(buf); buf = NULL; + if (cfclose(pstCfp) != 0) exit_horribly(modulename, "could not close data file: %s\n", strerror(errno)); } @@ -436,8 +505,13 @@ static int _WriteByte(ArchiveHandle* AH, const int i) unsigned char c = (unsigned char)i; lclContext* ctx = (lclContext*)AH->formatData; - if (cfwrite(&c, 1, ctx->dataFH) != 1) - exit_horribly(modulename, "could not write byte\n"); + if (ctx->dataCryptoCache) { + fillWriteCryptoCache(AH, ctx->dataCryptoCache, ctx->dataFH, &c, 1); + } else { + if (cfwrite(&c, 1, ctx->dataFH) != 1) { + exit_horribly(modulename, "could not write byte\n"); + } + } return 1; } @@ -453,7 +527,15 @@ static int _ReadByte(ArchiveHandle* AH) lclContext* ctx = (lclContext*)AH->formatData; int res; - res = cfgetc(ctx->dataFH); + if (ctx->dataCryptoCache) { + bool isEmpty = false; + unsigned char c; + readFromCryptoCache(AH, ctx->dataCryptoCache, ctx->dataFH, &c, 1, &isEmpty); + res = c; + } else { + res = cfgetc(ctx->dataFH); + } + if (res == EOF) exit_horribly(modulename, "unexpected end of file\n"); @@ -467,13 +549,22 @@ static int _ReadByte(ArchiveHandle* AH) static size_t _WriteBuf(ArchiveHandle* AH, const void* buf, size_t len) { lclContext* ctx = (lclContext*)AH->formatData; - size_t res; - res = cfwrite(buf, len, ctx->dataFH); - if (res != len) - exit_horribly(modulename, "could not write to output file: %s\n", strerror(errno)); + if (ctx->dataCryptoCache) { + fillWriteCryptoCache(AH, ctx->dataCryptoCache, ctx->dataFH, buf, len); + return len; + } else { + size_t res; - return res; + res = cfwrite(buf, len, ctx->dataFH); + if (res != len) { + exit_horribly(modulename, "could not write to output file:%s\n", strerror(errno)); + } + + return res; + } + + return 0; } /* @@ -486,7 +577,12 @@ static size_t _ReadBuf(ArchiveHandle* AH, void* buf, size_t len) lclContext* ctx = (lclContext*)AH->formatData; size_t res; - res = cfread(buf, len, ctx->dataFH); + if (ctx->dataCryptoCache) { + bool isEmpty = false; + res = readFromCryptoCache(AH, ctx->dataCryptoCache, ctx->dataFH, buf, len, &isEmpty); + } else { + res = cfread(buf, len, ctx->dataFH); + } return res; } @@ -517,6 +613,10 @@ static void _CloseArchive(ArchiveHandle* AH) exit_horribly(modulename, "could not open output file \"%s\": %s\n", fname, strerror(errno)); ctx->dataFH = tocFH; + if (AH->publicArc.encryptfile) { + initCryptoCache(archModeWrite, &(ctx->dataCryptoCache)); + } + /* * Write 'tar' in the format field of the toc.dat file. The directory * is compatible with 'tar', so there's no point having a different @@ -526,6 +626,12 @@ static void _CloseArchive(ArchiveHandle* AH) WriteHead(AH); AH->format = archDirectory; WriteToc(AH); + + if (AH->publicArc.encryptfile) { + encryptAndFlushCache(AH, ctx->dataCryptoCache, tocFH); + releaseCryptoCache(ctx->dataCryptoCache); + } + if (cfclose(tocFH) != 0) exit_horribly(modulename, "could not close TOC file: %s\n", strerror(errno)); WriteDataChunks(AH); @@ -575,6 +681,10 @@ static void _StartBlob(ArchiveHandle* AH, TocEntry* te, Oid oid) if (ctx->dataFH == NULL) exit_horribly(modulename, "could not open output file \"%s\": %s\n", fname, strerror(errno)); + + if (AH->publicArc.encryptfile) { + initCryptoCache(AH->mode, &(ctx->dataCryptoCache)); + } } /* @@ -588,6 +698,11 @@ static void _EndBlob(ArchiveHandle* AH, TocEntry* te, Oid oid) char buf[50] = {0}; int len; + if (ctx->dataCryptoCache) { + encryptAndFlushCache(AH, ctx->dataCryptoCache, ctx->dataFH); + releaseCryptoCache(ctx->dataCryptoCache); + } + /* Close the BLOB data file itself */ (void)cfclose(ctx->dataFH); ctx->dataFH = NULL; @@ -640,3 +755,131 @@ static char* prependDirectory(ArchiveHandle* AH, const char* relativeFilename) return buf; } + +static void initCryptoCache(ArchiveMode archiveMode, DFormatCryptoCache** cryptoCache) +{ + + errno_t rc = 0; + *cryptoCache = (DFormatCryptoCache*)pg_malloc(sizeof(DFormatCryptoCache)); + + if (archiveMode == archModeWrite) { + (*cryptoCache)->cacheType = WRITE_CRYPTO_CACHE; + (*cryptoCache)->cryptoCache.wrCryptoCache.writeCacheLen = 0; + rc = memset_s((*cryptoCache)->cryptoCache.wrCryptoCache.writeCache, MAX_WRITE_CACHE_LEN, 0, MAX_WRITE_CACHE_LEN); + securec_check_c(rc, "\0", "\0"); + } else { + (*cryptoCache)->cacheType = READ_CRYPTO_CACHE; + (*cryptoCache)->cryptoCache.rCryptoCache.readCacheLen = 0; + (*cryptoCache)->cryptoCache.rCryptoCache.readPosition = 0; + rc = memset_s((*cryptoCache)->cryptoCache.rCryptoCache.readCache, MAX_CRYPTO_CACHE_LEN, 0, MAX_CRYPTO_CACHE_LEN); + securec_check_c(rc, "\0", "\0"); + } +} + +static void releaseCryptoCache(DFormatCryptoCache* cryptoCache) +{ + resetCryptoCache(cryptoCache); + GS_FREE(cryptoCache); +} + +static void resetCryptoCache(DFormatCryptoCache* cryptoCache) +{ + errno_t rc = 0; + + if (cryptoCache->cacheType == WRITE_CRYPTO_CACHE) { + cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen = 0; + rc = memset_s(cryptoCache->cryptoCache.wrCryptoCache.writeCache, MAX_WRITE_CACHE_LEN, 0, MAX_WRITE_CACHE_LEN); + securec_check_c(rc, "\0", "\0"); + } else { + cryptoCache->cacheType = READ_CRYPTO_CACHE; + cryptoCache->cryptoCache.rCryptoCache.readCacheLen = 0; + cryptoCache->cryptoCache.rCryptoCache.readPosition = 0; + rc = memset_s(cryptoCache->cryptoCache.rCryptoCache.readCache, MAX_CRYPTO_CACHE_LEN, 0, MAX_CRYPTO_CACHE_LEN); + securec_check_c(rc, "\0", "\0"); + } +} + +static void encryptAndFlushCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH) +{ + char flushData[MAX_CRYPTO_CACHE_LEN] = {0}; + int flushLen = MAX_CRYPTO_CACHE_LEN; + + symmEncDec(AH, true, cryptoCache->cryptoCache.wrCryptoCache.writeCache, cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen, flushData, &flushLen); + + cfwrite(flushData, flushLen, FH); +} + +static void fillWriteCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH, const void* buf, size_t len) +{ + errno_t rc = 0; + /*缓存空间不足,则加密刷盘,清空缓存*/ + if (cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen + len > MAX_WRITE_CACHE_LEN) { + encryptAndFlushCache(AH, cryptoCache, FH); + resetCryptoCache(cryptoCache); + } + + rc = memcpy_s(cryptoCache->cryptoCache.wrCryptoCache.writeCache + cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen, MAX_WRITE_CACHE_LEN - cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen, (char*)buf, len); + securec_check_c(rc, "\0", "\0"); + cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen += len; + +} + +static void fillReadCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH) +{ + char encData[MAX_CRYPTO_CACHE_LEN] = {0}; + int encLen = 0; + + /*先读取文件密文,然后解密写入缓存,这里先直接放缓存*/ + encLen = cfread(encData, MAX_CRYPTO_CACHE_LEN, FH); + + if (encLen > 0) { + cryptoCache->cryptoCache.rCryptoCache.readCacheLen = encLen; + symmEncDec(AH, false, encData, encLen, cryptoCache->cryptoCache.rCryptoCache.readCache, &(cryptoCache->cryptoCache.rCryptoCache.readCacheLen)); + } + +} + +static int readFromCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCache, cfp* FH, void* buf, size_t len, bool *isempty) +{ + errno_t rc = 0; + + if (len == 0) { + return 0; + } + + /*如果缓存数据足够,则直接copy给返回*/ + if (cryptoCache->cryptoCache.rCryptoCache.readCacheLen >= (int)len) { + rc = memcpy_s((unsigned char*)buf, len, cryptoCache->cryptoCache.rCryptoCache.readCache + cryptoCache->cryptoCache.rCryptoCache.readPosition, len); + securec_check_c(rc, "\0", "\0"); + cryptoCache->cryptoCache.rCryptoCache.readPosition += len; + cryptoCache->cryptoCache.rCryptoCache.readCacheLen -= len; + + return len; + } else { + /*如果缓存数据不够,则先将当前缓存的数据copy,清空缓存,再去读文件解密写缓存,再从缓存copy剩下需要的数据长度*/ + int realLen = 0; + int needLen = len; + int nextGetLen = 0; + bool tmpEmpty = false; + + rc = memcpy_s((char*)buf, len, cryptoCache->cryptoCache.rCryptoCache.readCache + cryptoCache->cryptoCache.rCryptoCache.readPosition, cryptoCache->cryptoCache.rCryptoCache.readCacheLen); + securec_check_c(rc, "\0", "\0"); + realLen += cryptoCache->cryptoCache.rCryptoCache.readCacheLen; + needLen -= cryptoCache->cryptoCache.rCryptoCache.readCacheLen; + + resetCryptoCache(cryptoCache); + fillReadCryptoCache(AH, cryptoCache, FH); + + /*文件已读完*/ + if (cryptoCache->cryptoCache.rCryptoCache.readCacheLen == 0) { + *isempty = true; + return realLen; + } + + nextGetLen = readFromCryptoCache(AH, cryptoCache, FH, ((char*)buf + realLen), (size_t)needLen, &tmpEmpty); + if (nextGetLen == needLen || tmpEmpty == true) + return nextGetLen + realLen; + } + + return 0; +} \ No newline at end of file diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 85c6a50699..6ce030f0a4 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -80,6 +80,7 @@ #include "openssl/rand.h" #include "miscadmin.h" #include "bin/elog.h" +#include "pg_backup_cipher.h" #ifdef HAVE_CE #include "client_logic_cache/types_to_oid.h" #include "client_logic_processor/values_processor.h" @@ -93,7 +94,6 @@ #include "utils/rel.h" #include "utils/rel_gs.h" #include "catalog/pg_constraint.h" - #else typedef enum { @@ -313,6 +313,9 @@ const char* encrypt_mode = NULL; const char* encrypt_key = NULL; extern const char* encrypt_salt; +static const char* module_params = NULL; +static bool gen_key = false; + /* subquery used to convert user ID (eg, datdba) to user name */ static const char* username_subquery; @@ -671,7 +674,8 @@ int main(int argc, char** argv) #ifdef DUMPSYSLOG {"syslog", no_argument, &dump_syslog, 1}, #endif - /* Database Security: Data importing/dumping support AES128. */ + /* Database Security: enc mode , soft only AES128 is available, + * common cipher support AES128_CBC,AES128_CTR,AES128_GCM,AES256_CBC,AES256_CTR,AES256_GCM,SM4_CBC,SM4_CTR. */ {"with-encryption", required_argument, NULL, 6}, {"with-key", required_argument, NULL, 7}, {"rolepassword", required_argument, NULL, 9}, @@ -691,6 +695,8 @@ int main(int argc, char** argv) #if defined(USE_ASSERT_CHECKING) || defined(FASTCHECK) {"disable-progress", no_argument, NULL, 18}, #endif + {"with-module-params", required_argument, NULL, 19}, + {"gen-key", no_argument, NULL, 20}, {NULL, 0, NULL, 0}}; set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("gs_dump")); @@ -759,8 +765,8 @@ int main(int argc, char** argv) } if (!could_encrypt && is_encrypt) { - exit_horribly(NULL, "Encrypt mode is only supported for plain text, " - "You should assign -F p or -F plain.\n"); + exit_horribly(NULL, "Encrypt mode is only supported for plain text and directory, " + "You should assign -F p/d or -F plain/directory.\n"); } /* Custom and directory formats are compressed by default, others not */ @@ -840,32 +846,37 @@ int main(int argc, char** argv) /* Let the archiver know how noisy to be */ fout->verbose = g_verbose; - if ((encrypt_mode != NULL) && (encrypt_key == NULL)) { - get_encrypt_key(); - } - /* Database Security: Data importing/dumping support AES128. */ - check_encrypt_parameters(fout, encrypt_mode, encrypt_key); + if (module_params != NULL) { + CryptoModuleParamsCheck((ArchiveHandle*)fout, module_params, encrypt_mode, encrypt_key, encrypt_salt, gen_key); + } else if (is_encrypt) { - if (true == fout->encryptfile) { - if (NULL == encrypt_salt) { - GS_UINT32 retval = 0; - GS_UCHAR init_rand[RANDOM_LEN + 1] = {0}; + if ((encrypt_mode != NULL) && (encrypt_key == NULL)) { + get_encrypt_key(); + } + /* Database Security: Data importing/dumping support AES128. */ + check_encrypt_parameters(fout, encrypt_mode, encrypt_key); - /* get a random values as salt for encrypt */ - retval = RAND_priv_bytes(init_rand, RANDOM_LEN); - if (retval != 1) { - exit_horribly(NULL, "Generate random key failed\n"); - } + if (true == fout->encryptfile) { + if (NULL == encrypt_salt) { + GS_UINT32 retval = 0; + GS_UCHAR init_rand[RANDOM_LEN + 1] = {0}; - rc = memset_s(fout->rand, (RANDOM_LEN + 1), 0, RANDOM_LEN + 1); - securec_check_c(rc, "\0", "\0"); - rc = memcpy_s((GS_UCHAR*)fout->rand, RANDOM_LEN, init_rand, RANDOM_LEN); - securec_check_c(rc, "\0", "\0"); - } else { - rc = memset_s(fout->rand, (RANDOM_LEN + 1), 0, RANDOM_LEN + 1); - securec_check_c(rc, "\0", "\0"); - rc = memcpy_s((GS_UCHAR*)fout->rand, RANDOM_LEN, encrypt_salt, RANDOM_LEN); - securec_check_c(rc, "\0", "\0"); + /* get a random values as salt for encrypt */ + retval = RAND_priv_bytes(init_rand, RANDOM_LEN); + if (retval != 1) { + exit_horribly(NULL, "Generate random key failed\n"); + } + + rc = memset_s(fout->rand, (RANDOM_LEN + 1), 0, RANDOM_LEN + 1); + securec_check_c(rc, "\0", "\0"); + rc = memcpy_s((GS_UCHAR*)fout->rand, RANDOM_LEN, init_rand, RANDOM_LEN); + securec_check_c(rc, "\0", "\0"); + } else { + rc = memset_s(fout->rand, (RANDOM_LEN + 1), 0, RANDOM_LEN + 1); + securec_check_c(rc, "\0", "\0"); + rc = memcpy_s((GS_UCHAR*)fout->rand, RANDOM_LEN, encrypt_salt, RANDOM_LEN); + securec_check_c(rc, "\0", "\0"); + } } } @@ -1736,6 +1747,15 @@ void getopt_dump(int argc, char** argv, struct option options[], int* result) disable_progress = true; break; #endif + case 19: + GS_FREE(module_params); + module_params = gs_strdup(optarg); + is_encrypt = true; + break; + case 20: + gen_key = true; + is_encrypt = true; + break; default: write_stderr(_("Try \"%s --help\" for more information.\n"), progname); exit_nicely(1); @@ -1933,9 +1953,15 @@ void help(const char* pchProgname) " ALTER OWNER commands to set ownership\n")); printf(_(" --exclude-function do not dump function and procedure\n")); /* Database Security: Data importing/dumping support AES128. */ - printf(_(" --with-encryption=AES128 dump data is encrypted using AES128\n")); - printf(_(" --with-key=KEY AES128 encryption key, must be 16 bytes in length\n")); + printf(_(" --with-encryption=AES128 dump data is encrypted,soft only AES128 is available" + "common cipher support AES128_CBC,AES128_CTR,AES128_GCM,AES256_CBC,AES256_CTR,AES256_GCM,SM4_CBC,SM4_CTR\n")); + printf(_(" --with-key=KEY soft AES128 encryption key, must be 16 bytes in length,common cipher key is base64 encoded,max 44 bytes\n")); printf(_(" --with-salt=RANDVALUES used by gs_dumpall, pass rand value array\n")); + printf(_(" --with-module-params=MODLUE_TYPE=TYPE,MODULE_LIB_PATH=path,MODULE_CONFIG_FILE_PATH=path" + "type:GDACCARD,JNTAKMS,SWXAKMS;MODULE_LIB_PATH:need include lib file absolute path;" + "MODULE_CONFIG_FILE_PATH:GDACCARD need not,JNTAKMS exclude lib file name absolute path,SWXA need include lib file absolute path" + "used by gs_dump, load device\n")); + printf(_(" --gen-key if you have not key for using,you can set this option to generate key and encrypt dump data,store it to using again\n")); #ifdef ENABLE_MULTIPLE_NODES printf(_(" --include-nodes include TO NODE/GROUP clause in the dumped CREATE TABLE " "and CREATE FOREIGN TABLE commands.\n")); @@ -2121,8 +2147,10 @@ static ArchiveFormat parseArchiveFormat(ArchiveMode* mode) archiveFormat = archCustom; } else if (pg_strcasecmp(format, "d") == 0) { archiveFormat = archDirectory; + could_encrypt = true; } else if (pg_strcasecmp(format, "directory") == 0) { archiveFormat = archDirectory; + could_encrypt = true; } else if (pg_strcasecmp(format, "p") == 0) { archiveFormat = archNull; could_encrypt = true; diff --git a/src/bin/pg_dump/pg_restore.cpp b/src/bin/pg_dump/pg_restore.cpp index e306ca36a7..ebf910fddf 100644 --- a/src/bin/pg_dump/pg_restore.cpp +++ b/src/bin/pg_dump/pg_restore.cpp @@ -43,6 +43,7 @@ #include "dumpmem.h" #include "dumputils.h" +#include "pg_backup_cipher.h" #include #include @@ -92,6 +93,9 @@ static bool is_encrypt = false; static bool is_pipeline = false; static int no_subscriptions = 0; static int no_publications = 0; +static char* decrypt_mode = NULL; +static char* decrypt_salt = NULL; +static char* module_params = NULL; typedef struct option optType; #ifdef GSDUMP_LLT @@ -166,6 +170,9 @@ int main(int argc, char** argv) #if defined(USE_ASSERT_CHECKING) || defined(FASTCHECK) {"disable-progress", no_argument, NULL, 8}, #endif + {"with-salt", required_argument, NULL, 9}, + {"with-module-params", required_argument, NULL, 10}, + {"with-decryption", required_argument, NULL, 11}, {NULL, 0, NULL, 0}}; set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("gs_dump")); @@ -263,10 +270,10 @@ int main(int argc, char** argv) init_audit(PROG_NAME, argc, argv); /* validate the restore options before start the actual operation */ validate_restore_options(argv, opts); - if (is_encrypt) { + if (is_encrypt && module_params == NULL) { exit_horribly(NULL, "Encrypt mode is not supported yet.\n"); + decryptfile = checkDecryptArchive(&inputFileSpec, (ArchiveFormat)opts->format, decrypt_key); } - decryptfile = checkDecryptArchive(&inputFileSpec, (ArchiveFormat)opts->format, decrypt_key); /* Take lock on the file itself on non-directory format and create a * lock file on the directory and take lock on that file @@ -286,7 +293,22 @@ int main(int argc, char** argv) on_exit_nicely(catalog_unlock, NULL); } - AH = OpenArchive(inputFileSpec, (ArchiveFormat)opts->format); + if (module_params) { + CryptoModuleCheckParam cryptoModuleCheckParam; + + rc = memset_s(&cryptoModuleCheckParam, sizeof(CryptoModuleCheckParam), 0, sizeof(CryptoModuleCheckParam)); + securec_check_c(rc, "\0", "\0"); + + cryptoModuleCheckParam.module_params = module_params; + cryptoModuleCheckParam.mode = decrypt_mode; + cryptoModuleCheckParam.key = decrypt_key; + cryptoModuleCheckParam.salt = decrypt_salt; + cryptoModuleCheckParam.genkey = false; + + AH = OpenArchive(inputFileSpec, (ArchiveFormat)opts->format, &cryptoModuleCheckParam); + } else { + AH = OpenArchive(inputFileSpec, (ArchiveFormat)opts->format); + } /* * We don't have a connection yet but that doesn't matter. The connection @@ -730,6 +752,21 @@ static void restore_getopts(int argc, char** argv, struct option* options, Resto opts->disable_progress = true; break; #endif + case 9: + GS_FREE(decrypt_salt); + decrypt_salt = gs_strdup(optarg); + is_encrypt = true; + break; + case 10: + GS_FREE(module_params); + module_params = gs_strdup(optarg); + is_encrypt = true; + break; + case 11: + GS_FREE(decrypt_mode); + decrypt_mode = gs_strdup(optarg); + is_encrypt = true; + break; default: write_stderr(_("Try \"%s --help\" for more information.\n"), progname); exit_nicely(1); @@ -808,6 +845,13 @@ void usage(const char* pchProgname) printf(_(" -W, --password=PASSWORD the password of specified database user\n")); printf(_(" --role=ROLENAME do SET ROLE before restore\n")); printf(_(" --rolepassword=ROLEPASSWORD the password for role\n")); + printf(_(" --with-decryption= type common cipher support AES128_CBC,AES128_CTR,AES128_GCM,AES256_CBC,AES256_CTR,AES256_GCM,SM4_CBC,SM4_CTR\n")); + printf(_(" --with-key=KEY common cipher key is base64 encoded,max 44 bytes\n")); + printf(_(" --with-salt=RANDVALUES common cipher salt must be 16 bytes\n")); + printf(_(" --with-module-params=MODLUE_TYPE=TYPE,MODULE_LIB_PATH=path,MODULE_CONFIG_FILE_PATH=path" + "type:GDACCARD,JNTAKMS,SWXAKMS;MODULE_LIB_PATH:need include lib file absolute path;" + "MODULE_CONFIG_FILE_PATH:GDACCARD need not,JNTAKMS exclude lib file name absolute path,SWXA need include lib file absolute path" + "used by gs_dump, load device\n")); } /* diff --git a/src/bin/psql/CMakeLists.txt b/src/bin/psql/CMakeLists.txt index cf2895fca0..3b69e494c6 100755 --- a/src/bin/psql/CMakeLists.txt +++ b/src/bin/psql/CMakeLists.txt @@ -29,6 +29,7 @@ add_custom_command( ) set(TGT_gsql_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/common_cipher.cpp ${CMAKE_CURRENT_SOURCE_DIR}/command.cpp ${CMAKE_CURRENT_SOURCE_DIR}/common.cpp ${CMAKE_CURRENT_SOURCE_DIR}/help.cpp diff --git a/src/bin/psql/Makefile b/src/bin/psql/Makefile index 055e97bfc8..7b7c2633a5 100644 --- a/src/bin/psql/Makefile +++ b/src/bin/psql/Makefile @@ -73,7 +73,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif endif -OBJS= command.o common.o help.o input.o stringutils.o mainloop.o copy.o \ +OBJS= common_cipher.o command.o common.o help.o input.o stringutils.o mainloop.o copy.o \ startup.o prompt.o variables.o large_obj.o print.o describe.o \ mbprint.o dumputils.o keywords.o kwlookup.o tab-complete.o\ sql_help.o \ diff --git a/src/bin/psql/common_cipher.cpp b/src/bin/psql/common_cipher.cpp new file mode 100644 index 0000000000..17232488a0 --- /dev/null +++ b/src/bin/psql/common_cipher.cpp @@ -0,0 +1,306 @@ +#include "common_cipher.h" +#include "securec.h" +#include "securec_check.h" +#include "port.h" + +#define MAX_PROVIDER_NAME_LEN 128 +#define MAX_ERRMSG_LEN 256 + +typedef enum { + MODULE_AES_128_CBC = 0, + MODULE_AES_128_CTR, + MODULE_AES_128_GCM, + MODULE_AES_256_CBC, + MODULE_AES_256_CTR, + MODULE_AES_256_GCM, + MODULE_SM4_CBC, + MODULE_SM4_CTR, + MODULE_HMAC_SHA256, + MODULE_HMAC_SM3, + MODULE_DETERMINISTIC_KEY, + MODULE_ALGO_MAX = 1024 +} ModuleSymmKeyAlgo; + +typedef enum { + MODULE_SHA256 = 0, + MODULE_SM3, + MODULE_DIGEST_MAX = 1024 +} ModuleDigestAlgo; + +typedef enum { + KEY_TYPE_INVALID, + KEY_TYPE_PLAINTEXT, + KEY_TYPE_CIPHERTEXT, + KEY_TYPE_NAMEORIDX, + KEY_TYPE_MAX +} KeyType; + +typedef struct { + char provider_name[MAX_PROVIDER_NAME_LEN]; + KeyType key_type; + int supported_symm[MODULE_ALGO_MAX]; // 不支持算法填入0或者支持算法填入1 + int supported_digest[MODULE_DIGEST_MAX]; // 不支持算法填入0或者支持算法填入1 +} SupportedFeature; + + +typedef int (*crypto_module_init_type)(char *load_info, SupportedFeature *supported_feature); +typedef int (*crypto_module_sess_init_type)(char *key_info, void **sess); +typedef void (*crypto_module_sess_exit_type)(void *sess); +typedef int (*crypto_create_symm_key_type)(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size); +typedef int (*crypto_ctx_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size); +typedef int (*crypto_result_size_type)(void *ctx, int enc, size_t data_size); +typedef void (*crypto_ctx_clean_type)(void *ctx); +typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); +typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); +typedef void (*crypto_hmac_clean_type)(void *ctx); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); +typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); + + +static void *libhandle = NULL; + +static crypto_module_init_type crypto_module_init_use = NULL; +static crypto_module_sess_init_type crypto_module_sess_init_use = NULL; +static crypto_module_sess_exit_type crypto_module_sess_exit_use = NULL; +static crypto_create_symm_key_type crypto_create_symm_key_use = NULL; +static crypto_ctx_init_type crypto_ctx_init_use = NULL; +static crypto_result_size_type crypto_result_size_use = NULL; +static crypto_ctx_clean_type crypto_ctx_clean_use = NULL; +crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; +static crypto_digest_type crypto_digest_use = NULL; +static crypto_hmac_init_type crypto_hmac_init_use = NULL; +static crypto_hmac_clean_type crypto_hmac_clean_use = NULL; +static crypto_hmac_type crypto_hmac_use = NULL; +static crypto_gen_random_type crypto_gen_random_use = NULL; +static crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; +static crypto_get_errmsg_type crypto_get_errmsg_use = NULL; + +bool load_crypto_module_lib() +{ + errno_t rc = 0; + char libpath[1024] = {0}; + char* gaussHome = gs_getenv_r("GAUSSHOME"); + + rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); + securec_check_ss_c(rc, "", ""); + + libhandle = dlopen(libpath, RTLD_LAZY); + if (libhandle == NULL) { + return false; + } + + crypto_module_init_use = (crypto_module_init_type)dlsym(libhandle, "crypto_module_init"); + crypto_module_sess_init_use = (crypto_module_sess_init_type)dlsym(libhandle, "crypto_module_sess_init"); + crypto_module_sess_exit_use = (crypto_module_sess_exit_type)dlsym(libhandle, "crypto_module_sess_exit"); + crypto_create_symm_key_use = (crypto_create_symm_key_type)dlsym(libhandle, "crypto_create_symm_key"); + crypto_ctx_init_use = (crypto_ctx_init_type)dlsym(libhandle, "crypto_ctx_init"); + crypto_result_size_use = (crypto_result_size_type)dlsym(libhandle, "crypto_result_size"); + crypto_ctx_clean_use = (crypto_ctx_clean_type)dlsym(libhandle, "crypto_ctx_clean"); + crypto_encrypt_decrypt_use = (crypto_encrypt_decrypt_type)dlsym(libhandle, "crypto_encrypt_decrypt"); + crypto_digest_use = (crypto_digest_type)dlsym(libhandle, "crypto_digest"); + crypto_hmac_init_use = (crypto_hmac_init_type)dlsym(libhandle, "crypto_hmac_init"); + crypto_hmac_clean_use = (crypto_hmac_clean_type)dlsym(libhandle, "crypto_hmac_clean"); + crypto_hmac_use = (crypto_hmac_type)dlsym(libhandle, "crypto_hmac"); + crypto_gen_random_use = (crypto_gen_random_type)dlsym(libhandle, "crypto_gen_random"); + crypto_deterministic_enc_dec_use = (crypto_deterministic_enc_dec_type)dlsym(libhandle, "crypto_deterministic_enc_dec"); + crypto_get_errmsg_use = (crypto_get_errmsg_type)dlsym(libhandle, "crypto_get_errmsg"); + + if (crypto_module_init_use == NULL + || crypto_module_sess_init_use == NULL + || crypto_module_sess_exit_use == NULL + || crypto_create_symm_key_use == NULL + || crypto_ctx_init_use == NULL + || crypto_result_size_use == NULL + || crypto_ctx_clean_use == NULL + || crypto_encrypt_decrypt_use == NULL + || crypto_digest_use == NULL + || crypto_hmac_init_use == NULL + || crypto_hmac_clean_use == NULL + || crypto_hmac_use == NULL + || crypto_gen_random_use == NULL + || crypto_deterministic_enc_dec_use == NULL + || crypto_get_errmsg_use == NULL) { + dlclose(libhandle); + return false; + } + + return true; +} + +void unload_crypto_module(int code, void* args) +{ + if (libhandle) { + dlclose(libhandle); + libhandle = NULL; + } +} + +static int transform_type(char* type) +{ + if (strcmp(type, "AES128_CBC") == 0) { + return MODULE_AES_128_CBC; + } else if (strcmp(type, "AES128_CTR") == 0) { + return MODULE_AES_128_CTR; + } else if (strcmp(type, "AES128_GCM") == 0) { + return MODULE_AES_128_GCM; + } else if (strcmp(type, "AES256_CBC") == 0) { + return MODULE_AES_256_CBC; + } else if (strcmp(type, "AES256_CTR") == 0) { + return MODULE_AES_256_CTR; + } else if (strcmp(type, "AES256_GCM") == 0) { + return MODULE_AES_256_GCM; + } else if (strcmp(type, "SM4_CBC") == 0) { + return MODULE_SM4_CBC; + } else if (strcmp(type, "SM4_CTR") == 0) { + return MODULE_SM4_CTR; + } + + return -1; + +} + +void initCryptoModule(DecryptInfo* pDecryptInfo) +{ + int ret = 1; + SupportedFeature supportedfeature; + int modulType = 0; + + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_module_init_use(pDecryptInfo->crypto_modlue_params, &supportedfeature); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + + modulType = transform_type(pDecryptInfo->crypto_type); + if (modulType < 0 || supportedfeature.supported_symm[modulType] == 0) { + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + +} + +void initCryptoSession(DecryptInfo* pDecryptInfo) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_module_sess_init_use(NULL, &(pDecryptInfo->moduleSessionCtx)); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + +} + +void releaseCryptoSession(int code, void* args) +{ + if (libhandle && ((DecryptInfo*)args)->moduleSessionCtx) { + crypto_module_sess_exit_use(((DecryptInfo*)args)->moduleSessionCtx); + ((DecryptInfo*)args)->moduleSessionCtx = NULL; + } +} + +void initCryptoKeyCtx(DecryptInfo* pDecryptInfo) +{ + int ret = 1; + int enc = 0; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_ctx_init_use(pDecryptInfo->moduleSessionCtx, &(pDecryptInfo->moduleKeyCtx), (ModuleSymmKeyAlgo)transform_type(pDecryptInfo->crypto_type), enc, pDecryptInfo->Key, pDecryptInfo->keyLen); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + crypto_module_sess_exit_use(pDecryptInfo->moduleSessionCtx); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } +} + +void releaseCryptoCtx(int code, void* args) +{ + if (libhandle && ((DecryptInfo*)args)->moduleKeyCtx) { + crypto_ctx_clean_use(((DecryptInfo*)args)->moduleKeyCtx); + ((DecryptInfo*)args)->moduleKeyCtx = NULL; + } +} + +void symmEncDec(DecryptInfo* pDecryptInfo, bool isEnc, char* indata, int inlen, char* outdata, int* outlen) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_encrypt_decrypt_use(pDecryptInfo->moduleKeyCtx, isEnc, (unsigned char*)indata, inlen, pDecryptInfo->rand, 16, (unsigned char*)outdata, (size_t*)outlen, NULL); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + releaseCryptoCtx(0, pDecryptInfo); + releaseCryptoSession(0, pDecryptInfo); + unload_crypto_module(0, NULL); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } +} + +void CryptoModuleParamsCheck(DecryptInfo* pDecryptInfo, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt) +{ + errno_t rc = 0; + + if (!load_crypto_module_lib()) { + fprintf(stderr, ("load crypto module lib failed\n")); + exit(1); + } + + rc = memcpy_s((GS_UCHAR*)pDecryptInfo->crypto_modlue_params, CRYPTO_MODULE_PARAMS_MAX_LEN, params, strlen(params)); + securec_check_c(rc, "\0", "\0"); + + if (module_encrypt_mode == NULL) { + fprintf(stderr, ("encrypt_mode cannot be NULL\n")); + exit(1); + } else { + rc = memcpy_s((GS_UCHAR*)pDecryptInfo->crypto_type, CRYPTO_MODULE_ENC_TYPE_MAX_LEN, module_encrypt_mode, strlen(module_encrypt_mode)); + securec_check_c(rc, "\0", "\0"); + } + + if (module_encrypt_salt == NULL || strlen(module_encrypt_salt) != 16) { + fprintf(stderr, ("salt is needed and must be 16 bytes\n")); + exit(1); + } else { + rc = memcpy_s((GS_UCHAR*)pDecryptInfo->rand, RANDOM_LEN + 1, module_encrypt_salt, strlen(module_encrypt_salt)); + securec_check_c(rc, "\0", "\0"); + + pDecryptInfo->randget = true; + } + + initCryptoModule(pDecryptInfo); + initCryptoSession(pDecryptInfo); + + if (module_encrypt_key) { + char *tmpkey = NULL; + unsigned int tmpkeylen = 0; + + tmpkey = SEC_decodeBase64(module_encrypt_key, &tmpkeylen); + if (tmpkey == NULL || tmpkeylen > KEY_MAX_LEN) { + if (tmpkey) { + OPENSSL_free(tmpkey); + } + fprintf(stderr, ("invalid key\n")); + exit(1); + } else { + rc = memcpy_s((GS_UCHAR*)pDecryptInfo->Key, KEY_MAX_LEN, tmpkey, tmpkeylen); + securec_check_c(rc, "\0", "\0"); + pDecryptInfo->keyLen = tmpkeylen; + } + } else { + fprintf(stderr, ("invalid key\n")); + exit(1); + } + + initCryptoKeyCtx(pDecryptInfo); + + pDecryptInfo->encryptInclude = true; + pDecryptInfo->clientSymmCryptoFunc = crypto_encrypt_decrypt_use; +} diff --git a/src/bin/psql/common_cipher.h b/src/bin/psql/common_cipher.h new file mode 100644 index 0000000000..0a5d0a56b6 --- /dev/null +++ b/src/bin/psql/common_cipher.h @@ -0,0 +1,37 @@ +#ifndef COMMON_CIPHER_H +#define COMMON_CIPHER_H +#include +#include +#include +#include +#include +#include +#include "utils/aes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_CRYPTO_CACHE_LEN 8192 +#define CRYPTO_BLOCK_SIZE 16 +#define MAX_WRITE_CACHE_LEN (MAX_CRYPTO_CACHE_LEN - CRYPTO_BLOCK_SIZE) /*加密算法补pad模式为强补,最多可以补16字节,所以写缓存少16字节,则密文最长8192、保证读取时可以整块密文读入*/ + +typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +extern crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use; + +extern bool load_crypto_module_lib(); +extern void unload_crypto_module(int code, void* args); +extern void initCryptoModule(DecryptInfo* pDecryptInfo); +extern void initCryptoSession(DecryptInfo* pDecryptInfo); +extern void releaseCryptoSession(int code, void* args); +extern void initCryptoKeyCtx(DecryptInfo* pDecryptInfo); +extern void releaseCryptoCtx(int code, void* args); +extern void symmEncDec(DecryptInfo* pDecryptInfo, bool isEnc, char* indata, int inlen, char* outdata, int* outlen); +extern void symmGenerateKey(DecryptInfo* pDecryptInfo); +extern void CryptoModuleParamsCheck(DecryptInfo* pDecryptInfo, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt); + +#ifdef __cplusplus +} +#endif + +#endif /*COMMON_CIPHER_H*/ \ No newline at end of file diff --git a/src/bin/psql/startup.cpp b/src/bin/psql/startup.cpp index 64fb84c58d..36a808334b 100644 --- a/src/bin/psql/startup.cpp +++ b/src/bin/psql/startup.cpp @@ -38,6 +38,7 @@ #include "libpq/libpq-int.h" #endif #include "nodes/pg_list.h" +#include "common_cipher.h" /* * Global psql options @@ -1048,6 +1049,9 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* {"help", no_argument, NULL, '?'}, /* Database Security: Data importing/dumping support AES128. */ {"with-key", required_argument, NULL, 'k'}, + {"with-decryption", required_argument, NULL, 'D'}, + {"with-module-params", required_argument, NULL, 'u'}, + {"with-salt", required_argument, NULL, 1}, #if defined(USE_ASSERT_CHECKING) || defined(FASTCHECK) {"sql-parse", no_argument, NULL, 'g'}, #endif @@ -1061,6 +1065,9 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* bool action_string_need_free = false; /* Database Security: Data importing/dumping support AES128. */ char* dencrypt_key = NULL; + char* decrypt_salt = NULL; + char* module_params = NULL; + char* decrypt_type = NULL; char* dbname = NULL; errno_t rc = EOK; #ifdef USE_READLINE @@ -1073,7 +1080,7 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* check_short_optOfVoid("aAc:d:eEf:F:gh:Hlk:L:mno:p:P:qCR:rsStT:U:v:W:VxXz?012", argc, argv); while ((c = getopt_long( - argc, argv, "aAc:d:eEf:F:gh:Hlk:L:mno:p:P:qCR:rsStT:U:v:W:VxXz?012", long_options, &optindex)) != -1) { + argc, argv, "aAc:d:D:eEf:F:gh:Hlk:u:L:mno:p:P:qCR:rsStT:U:v:W:VxXz?012", long_options, &optindex)) != -1) { switch (c) { case 'a': if (!SetVariable(pset.vars, "ECHO", "all")) { @@ -1158,10 +1165,17 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* dencrypt_key = pg_strdup(optarg); rc = memset_s(optarg, strlen(optarg), 0, strlen(optarg)); check_memset_s(rc); - set_aes_key(dencrypt_key); - free(dencrypt_key); break; } + case 'D': + decrypt_type = pg_strdup(optarg); + break; + case 'u': + module_params = pg_strdup(optarg); + break; + case 1: + decrypt_salt = pg_strdup(optarg); + break; case 'L': options->logfilename = optarg; break; @@ -1321,6 +1335,17 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* } } + if (module_params) { + CryptoModuleParamsCheck(&(pset.decryptInfo), module_params, decrypt_type, dencrypt_key, decrypt_salt); + free(module_params); + free(decrypt_type); + free(dencrypt_key); + free(decrypt_salt); + } else if (dencrypt_key){ + set_aes_key(dencrypt_key); + free(dencrypt_key); + } + /* * if we still have arguments, use it as the database name and username */ diff --git a/src/gausskernel/cbb/utils/aes/aes.cpp b/src/gausskernel/cbb/utils/aes/aes.cpp index 10879fbfb4..e9c14cdd98 100644 --- a/src/gausskernel/cbb/utils/aes/aes.cpp +++ b/src/gausskernel/cbb/utils/aes/aes.cpp @@ -69,7 +69,7 @@ bool init_aes_vector_random(GS_UCHAR* aes_vector, size_t vector_len) /* inputstrlen must include the terminating '\0' character */ bool writeFileAfterEncryption( - FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* randvalue) + FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* randvalue, void* moduleKeyCtx, kernel_crypto_encrypt_decrypt_type encFunc) { void* writeBuff = NULL; int64 writeBuffLen; @@ -118,7 +118,7 @@ bool writeFileAfterEncryption( errorno = memset_s(outputstr, outputlen, '\0', outputlen); securec_check_c(errorno, "\0", "\0"); - /* put the rand in file first */ + /* put the rand in file first only for soft encryption*/ if (0 == strncmp((char*)randvalue, inputstr, RANDOM_LEN)) { errorno = memcpy_s((char*)writeBuff, writeBuffLen, inputstr, RANDOM_LEN); securec_check_c(errorno, "\0", "\0"); @@ -139,19 +139,33 @@ bool writeFileAfterEncryption( } /* the real encrypt operation */ - encryptstatus = aes128Encrypt((GS_UCHAR*)inputstr, - (GS_UINT32)inputstrlen, - Key, - (GS_UINT32)strlen((const char*)Key), - randvalue, - outputstr, - &cipherlen); - if (!encryptstatus) { - free(writeBuff); - writeBuff = NULL; - free(outputstr); - outputstr = NULL; - return false; + if (moduleKeyCtx && encFunc) { + int ret = 1; + cipherlen = outputlen; + + ret = encFunc(moduleKeyCtx, 1, (unsigned char*)inputstr, inputstrlen, randvalue, 16, (unsigned char*)outputstr, (size_t*)(&cipherlen), NULL); + if (ret != 1) { + free(writeBuff); + writeBuff = NULL; + free(outputstr); + outputstr = NULL; + return false; + } + } else { + encryptstatus = aes128Encrypt((GS_UCHAR*)inputstr, + (GS_UINT32)inputstrlen, + Key, + (GS_UINT32)strlen((const char*)Key), + randvalue, + outputstr, + &cipherlen); + if (!encryptstatus) { + free(writeBuff); + writeBuff = NULL; + free(outputstr); + outputstr = NULL; + return false; + } } errorno = sprintf_s(encryptleninfo, sizeof(encryptleninfo), "%u", cipherlen); @@ -197,6 +211,15 @@ void initDecryptInfo(DecryptInfo* pDecryptInfo) pDecryptInfo->randget = false; errorno = memset_s(pDecryptInfo->rand, RANDOM_LEN + 1, '\0', RANDOM_LEN + 1); securec_check_c(errorno, "\0", "\0"); + + errorno = memset_s(pDecryptInfo->crypto_modlue_params, CRYPTO_MODULE_PARAMS_MAX_LEN, '\0', CRYPTO_MODULE_PARAMS_MAX_LEN); + securec_check_c(errorno, "\0", "\0"); + + errorno = memset_s(pDecryptInfo->crypto_type, CRYPTO_MODULE_ENC_TYPE_MAX_LEN, '\0', CRYPTO_MODULE_ENC_TYPE_MAX_LEN); + securec_check_c(errorno, "\0", "\0"); + + pDecryptInfo->moduleKeyCtx = NULL; + pDecryptInfo->moduleSessionCtx = NULL; } static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) { @@ -208,6 +231,7 @@ static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) GS_UCHAR* outputstr = NULL; bool decryptstatus = false; errno_t errorno = EOK; + int moduleRet = 1; if (!feof(source) && (false == pDecryptInfo->isCurrLineProcess)) { nread = (int)fread((void*)cipherleninfo, 1, RANDOM_LEN, source); @@ -219,7 +243,7 @@ static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) return false; } - /* get the rand value from encryptfile first */ + /* get the rand value from encryptfile first only for soft encryption */ if (!pDecryptInfo->randget) { errorno = memcpy_s(pDecryptInfo->rand, RANDOM_LEN + 1, cipherleninfo, RANDOM_LEN); securec_check_c(errorno, "\0", "\0"); @@ -264,17 +288,22 @@ static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) /* read ciphertext from encrypt file. */ nread = (int)fread((void*)ciphertext, 1, cipherlen, source); if (nread) { - /* the real decrypt operation */ - decryptstatus = aes128Decrypt(ciphertext, - (GS_UINT32)cipherlen, - (GS_UCHAR*)pDecryptInfo->Key, - (GS_UINT32)strlen((const char*)pDecryptInfo->Key), - pDecryptInfo->rand, - outputstr, - &plainlen); + if (pDecryptInfo->moduleKeyCtx && pDecryptInfo->clientSymmCryptoFunc) { + plainlen = cipherlen; + moduleRet = pDecryptInfo->clientSymmCryptoFunc(pDecryptInfo->moduleKeyCtx, 0, ciphertext, cipherlen, pDecryptInfo->rand, 16, outputstr,(size_t*)(&plainlen), NULL); + } else { + decryptstatus = aes128Decrypt(ciphertext, + (GS_UINT32)cipherlen, + (GS_UCHAR*)pDecryptInfo->Key, + (GS_UINT32)strlen((const char*)pDecryptInfo->Key), + pDecryptInfo->rand, + outputstr, + &plainlen); + } } - if (!nread || !decryptstatus) { + if (!nread || (!decryptstatus && (pDecryptInfo->moduleKeyCtx == NULL && pDecryptInfo->clientSymmCryptoFunc == NULL)) + || (moduleRet != 1 && pDecryptInfo->moduleKeyCtx && pDecryptInfo->clientSymmCryptoFunc)) { errorno = memset_s(ciphertext, cipherlen, '\0', cipherlen); securec_check_c(errorno, "", ""); free(ciphertext); diff --git a/src/include/utils/aes.h b/src/include/utils/aes.h index 7b5635b32a..5f0aa17407 100644 --- a/src/include/utils/aes.h +++ b/src/include/utils/aes.h @@ -42,23 +42,34 @@ #define AES_ENCRYPT_LEN(inputlen) \ ((inputlen % AES_GROUP_LEN) ? ((inputlen / AES_GROUP_LEN) * AES_GROUP_LEN + AES_GROUP_LEN) : inputlen) +typedef int (*kernel_crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); + +#define CRYPTO_MODULE_PARAMS_MAX_LEN 1024 +#define CRYPTO_MODULE_ENC_TYPE_MAX_LEN 16 typedef struct decrypt_struct { unsigned char* decryptBuff; char currLine[MAX_DECRYPT_BUFF_LEN]; unsigned char Key[KEY_MAX_LEN]; + int keyLen; bool isCurrLineProcess; bool encryptInclude; + kernel_crypto_encrypt_decrypt_type clientSymmCryptoFunc; + /* Encrypt gs_dump file through OpenSSL function */ bool randget; unsigned char rand[RANDOM_LEN + 1]; + void* moduleSessionCtx; + void* moduleKeyCtx; + char crypto_modlue_params[CRYPTO_MODULE_PARAMS_MAX_LEN]; + char crypto_type[CRYPTO_MODULE_ENC_TYPE_MAX_LEN]; } DecryptInfo; extern void initDecryptInfo(DecryptInfo* pDecryptInfo); extern char* getLineFromAesEncryptFile(FILE* source, DecryptInfo* pDecryptInfo); extern bool writeFileAfterEncryption( - FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* rand); + FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* rand, void* moduleKeyCtx = NULL, kernel_crypto_encrypt_decrypt_type encFunc = NULL); extern bool check_key(const char* key, int NUM); extern void aesEncrypt(char* inputstr, unsigned long inputstrlen, char* outputstr, unsigned char Key[]); extern void aesDecrypt(char* inputstr, unsigned long inputstrlen, char* outputstr, unsigned char Key[], bool isBinary); -- Gitee From c3b3dd1ba78ade26a6d9e762e24e67db64f460db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=80=B6=E6=A2=A6=E5=8A=A0=E5=BE=97?= Date: Wed, 28 Aug 2024 15:15:21 +0800 Subject: [PATCH 251/347] =?UTF-8?q?=E3=80=90feature=E3=80=91pg=5Fprobackup?= =?UTF-8?q?=E9=80=82=E9=85=8D=E7=A1=AC=E4=BB=B6=E5=AF=86=E7=A0=81=E6=A8=A1?= =?UTF-8?q?=E5=9D=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/Makefile | 2 +- src/bin/pg_probackup/backup.cpp | 165 +++++++++++++++++ src/bin/pg_probackup/catalog.cpp | 238 +++++++++++++++++++++++++ src/bin/pg_probackup/common_cipher.cpp | 232 ++++++++++++++++++++++++ src/bin/pg_probackup/common_cipher.h | 83 +++++++++ src/bin/pg_probackup/pg_probackup.cpp | 25 ++- src/bin/pg_probackup/pg_probackupc.h | 15 ++ 7 files changed, 757 insertions(+), 3 deletions(-) create mode 100644 src/bin/pg_probackup/common_cipher.cpp create mode 100644 src/bin/pg_probackup/common_cipher.h diff --git a/src/bin/pg_probackup/Makefile b/src/bin/pg_probackup/Makefile index cfdc1b0283..9b28ea0efd 100644 --- a/src/bin/pg_probackup/Makefile +++ b/src/bin/pg_probackup/Makefile @@ -10,7 +10,7 @@ PROGRAM = gs_probackup # utils -OBJS = configuration.o json.o logger.o \ +OBJS = common_cipher.o configuration.o json.o logger.o \ parray.o pgut.o thread.o remote.o file.o file_gz.o psprintf.o \ atomics.o s_lock.o diff --git a/src/bin/pg_probackup/backup.cpp b/src/bin/pg_probackup/backup.cpp index 76833d3c0f..8c92425974 100644 --- a/src/bin/pg_probackup/backup.cpp +++ b/src/bin/pg_probackup/backup.cpp @@ -23,6 +23,7 @@ #include #include +#include "common_cipher.h" #include "tool_common.h" #include "thread.h" #include "file.h" @@ -143,6 +144,8 @@ static bool IsPrimary(PGconn* conn); static void *ProgressReportProbackup(void *arg); static void *ProgressReportSyncBackupFile(void *arg); +static void compress_encrypt_directory(); + static void backup_stopbackup_callback(bool fatal, void *userdata) { @@ -1125,6 +1128,8 @@ do_backup(time_t start_time, pgSetBackupParams *set_backup_params, if (!no_validate && current.media_type != MEDIA_TYPE_OSS) pgBackupValidate(¤t, NULL); + compress_encrypt_directory(); + /* do something after backup */ do_after_backup(); @@ -2903,6 +2908,166 @@ static bool PathContainPath(const char* path1, const char* path2) return false; } +/* compress and then encrypt the directory */ +static void compress_encrypt_directory() +{ + error_t rc; + int ret = 0; + bool res = false; + uint key_len = 0; + uint enc_buffer_len = 0; + uint out_buffer_len = 0; + long int backup_tar_pos = 0; + long int backup_tar_length = 0; + char* key = NULL; + char sys_cmd[MAXPGPATH] = {0}; + char tar_file[MAXPGPATH] = {0}; + char enc_file[MAXPGPATH] = {0}; + unsigned char enc_buffer[MAX_ENCRYPT_LEN + 1] = {0}; + unsigned char out_buffer[MAX_ENCRYPT_LEN + 1] = {0}; + char errmsg[MAX_ERRMSG_LEN] = {0}; + int algo; + + if (NULL == encrypt_dev_params) { + return; + } + + rc = sprintf_s(sys_cmd, MAXPGPATH, "tar -cPf %s.tar %s > /dev/null 2>&1", current.root_dir, current.root_dir); + securec_check_ss_c(rc, "\0", "\0"); + if (!is_valid_cmd(sys_cmd)) { + elog(ERROR, "cmd is rejected"); + return; + } + system(sys_cmd); + rc = memset_s(sys_cmd, MAXPGPATH,0, MAXPGPATH); + securec_check(rc, "\0", "\0"); + + rc = sprintf_s(tar_file, MAXPGPATH, "%s.tar", current.root_dir); + securec_check_ss_c(rc, "\0", "\0"); + + rc = sprintf_s(enc_file, MAXPGPATH, "%s_enc", current.root_dir); + securec_check_ss_c(rc, "\0", "\0"); + FILE* enc_backup_fd = fopen(enc_file, "wb"); + if(!enc_backup_fd) { + elog(ERROR, ("failed to create or open encrypt backup file.")); + return; + } + + FILE* backup_tar_fd = fopen(tar_file, "rb"); + if (!backup_tar_fd) { + elog(ERROR, ("failed to open compressed backup file")); + return; + } + + CryptoModuleParamsCheck(gen_key, encrypt_dev_params, encrypt_mode, encrypt_key, encrypt_salt); + + initCryptoSession(&crypto_module_session); + + algo = transform_type(encrypt_mode); + + if (gen_key) { + ret = crypto_create_symm_key_use(crypto_module_session, (ModuleSymmKeyAlgo)algo, (unsigned char*)key, (size_t*)&key_len); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, "crypto module gen key error, errmsg:%s\n", errmsg); + } + } else { + key = SEC_decodeBase64(encrypt_key, &key_len); + if (NULL == key) { + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, "crypto module decode key error, please check --with-key."); + } + } + + ret = crypto_ctx_init_use(crypto_module_session, &crypto_module_keyctx, (ModuleSymmKeyAlgo)algo, 1, (unsigned char*)key, key_len); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, "crypto keyctx init error, errmsg:%s\n", errmsg); + } + + fseek(backup_tar_fd,0,SEEK_END); + backup_tar_length = ftell(backup_tar_fd); + fseek(backup_tar_fd,0,SEEK_SET); + + while(backup_tar_pos < backup_tar_length) + { + if ((backup_tar_length - backup_tar_pos) > MAX_ENCRYPT_LEN) { + ret = memset_s(enc_buffer, MAX_ENCRYPT_LEN + 1, '\0', MAX_ENCRYPT_LEN + 1); + securec_check(ret, "\0", "\0"); + fread(enc_buffer,MAX_ENCRYPT_LEN,1,backup_tar_fd); + backup_tar_pos += MAX_ENCRYPT_LEN; + enc_buffer_len = MAX_ENCRYPT_LEN; + } else { + ret = memset_s(enc_buffer, MAX_ENCRYPT_LEN + 1, '\0', MAX_ENCRYPT_LEN + 1); + securec_check(ret, "\0", "\0"); + fread(enc_buffer,(backup_tar_length - backup_tar_pos),1,backup_tar_fd); + enc_buffer_len = backup_tar_length - backup_tar_pos; + backup_tar_pos = backup_tar_length; + } + + ret = memset_s(out_buffer, MAX_ENCRYPT_LEN + 1, '\0', MAX_ENCRYPT_LEN + 1); + securec_check(ret, "\0", "\0"); + + ret = crypto_encrypt_decrypt_use(crypto_module_keyctx, 1, (unsigned char*)enc_buffer, enc_buffer_len, + (unsigned char*)encrypt_salt, MAX_IV_LEN, out_buffer, (size_t*)&out_buffer_len, NULL); + if (ret != 1) { + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, ("failed to encrypt backup file")); + } + + fwrite(out_buffer, 1, out_buffer_len, enc_backup_fd); + } + + fclose(backup_tar_fd); + fclose(enc_backup_fd); + clearCrypto(crypto_module_session, crypto_module_keyctx); + + rc = sprintf_s(sys_cmd, MAXPGPATH, "rm %s %s.tar -rf", current.root_dir, current.root_dir); + securec_check_ss_c(rc, "\0", "\0"); + + if (!is_valid_cmd(sys_cmd)) { + elog(ERROR, "cmd is rejected"); + } + system(sys_cmd); + enc_flag = true; +} + +/* + * Function: is_valid_cmd + * Description: check cmd + * + * Input: + * char * cmd exec + * Return: + * bool true valid + */ +bool is_valid_cmd(char * cmd) +{ + if (NULL == cmd) + { + elog(ERROR, "cmd is NULL"); + return false; + } + + if (strstr(cmd, "rm")) + { + return true; + } + else if (strstr(cmd, "tar")) + { + return true; + } + else + { + elog(ERROR, "the cmd line is rejected:%s.",cmd); + return false; + } + +} + static bool IsPrimary(PGconn* conn) { #define MAXRUNMODE 64 diff --git a/src/bin/pg_probackup/catalog.cpp b/src/bin/pg_probackup/catalog.cpp index f645710551..d79c7bc2b7 100644 --- a/src/bin/pg_probackup/catalog.cpp +++ b/src/bin/pg_probackup/catalog.cpp @@ -22,6 +22,7 @@ #include "common/fe_memutils.h" #include "oss/include/restore.h" #include "oss/include/oss_operator.h" +#include "common_cipher.h" static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); @@ -31,6 +32,8 @@ static pgBackup *readBackupControlFile(const char *path); static bool exit_hook_registered = false; static parray *lock_files = NULL; +static void uncompress_decrypt_directory(const char *instance_name_str); + static timelineInfo * timelineInfoNew(TimeLineID tli) { @@ -447,6 +450,8 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id) char backup_instance_path[MAXPGPATH]; int nRet = 0; + uncompress_decrypt_directory(instance_name); + nRet = snprintf_s(backup_instance_path,MAXPGPATH,MAXPGPATH - 1, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name); securec_check_ss_c(nRet, "\0", "\0"); @@ -2870,3 +2875,236 @@ char* relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum) return path; } +/* decrypt and then uncompress the directory */ +static void uncompress_decrypt_directory(const char *instance_name_str) +{ + errno_t rc; + + bool res = false; + DIR *data_dir = NULL; + struct dirent *data_ent = NULL; + uint key_len = 0; + uint key_idx_uint = 0; + uint dec_buffer_len = 0; + uint out_buffer_len = MAX_CRYPTO_MODULE_LEN; + long int enc_file_pos = 0; + long int enc_file_len = 0; + char sys_cmd[MAXPGPATH] = {0}; + char* key = NULL; + unsigned char dec_buffer[MAX_CRYPTO_MODULE_LEN + 1] = {0}; + unsigned char out_buffer[MAX_CRYPTO_MODULE_LEN + 1] = {0}; + char enc_backup_file[MAXPGPATH] = {0}; + char dec_backup_file[MAXPGPATH] = {0}; + char backup_instance_path[MAXPGPATH] = {0}; + int algo; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + if (NULL == encrypt_dev_params) { + return; + } + + CryptoModuleParamsCheck(gen_key, encrypt_dev_params, encrypt_mode, encrypt_key, encrypt_salt); + + initCryptoSession(&crypto_module_session); + + algo = transform_type(encrypt_mode); + if (gen_key) { + rc = crypto_create_symm_key_use(crypto_module_session, (ModuleSymmKeyAlgo)algo, (unsigned char*)key, (size_t*)&key_len); + if (rc != 1) { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, "crypto module gen key error, errmsg:%s\n", errmsg); + } + } else { + key = SEC_decodeBase64(encrypt_key, &key_len); + if (NULL == key) { + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, "crypto module decode key error, please check --with-key."); + } + } + + rc = crypto_ctx_init_use(crypto_module_session, &crypto_module_keyctx, (ModuleSymmKeyAlgo)algo, 0, (unsigned char*)key, key_len); + if (rc != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, "crypto keyctx init error, errmsg:%s\n", errmsg); + } + + rc = sprintf_s(backup_instance_path, MAXPGPATH, "%s/%s/%s", + backup_path, BACKUPS_DIR, instance_name_str); + securec_check_ss_c(rc, "\0", "\0"); + + data_dir = fio_opendir(backup_instance_path, FIO_BACKUP_HOST); + if (data_dir == NULL) { + elog(ERROR, "cannot open directory \"%s\": %s", backup_instance_path, strerror(errno)); + return; + } + + for(;(data_ent = fio_readdir(data_dir)) != NULL; errno = 0) + { + if (strstr(data_ent->d_name,"_enc")) { + rc = sprintf_s(enc_backup_file, MAXPGPATH, "%s/%s", backup_instance_path, data_ent->d_name); + securec_check_ss_c(rc, "\0", "\0"); + + FILE* enc_backup_fd = fopen(enc_backup_file,"rb"); + if(NULL == enc_backup_fd) { + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, ("failed to create or open encrypt backup file.")); + return; + } + + fseek(enc_backup_fd,0,SEEK_END); + enc_file_len = ftell(enc_backup_fd); + + fseek(enc_backup_fd, 0, SEEK_SET); + + enc_file_pos = ftell(enc_backup_fd); + + rc = sprintf_s(dec_backup_file, MAXPGPATH, "%s/%s.tar", backup_instance_path, data_ent->d_name); + securec_check_ss_c(rc, "\0", "\0"); + + FILE* dec_file_fd = fopen(dec_backup_file,"wb"); + + while(enc_file_pos < enc_file_len) + { + if(enc_file_pos + MAX_CRYPTO_MODULE_LEN < enc_file_len) { + memset_s(dec_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + memset_s(out_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + fread(dec_buffer, 1, MAX_CRYPTO_MODULE_LEN, enc_backup_fd); + dec_buffer_len = MAX_CRYPTO_MODULE_LEN; + enc_file_pos += MAX_CRYPTO_MODULE_LEN; + } else { + memset_s(dec_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + memset_s(out_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + fread(dec_buffer, 1, enc_file_len - enc_file_pos, enc_backup_fd); + dec_buffer_len = enc_file_len - enc_file_pos; + enc_file_pos = enc_file_len; + } + + rc = crypto_encrypt_decrypt_use(crypto_module_keyctx, 0, (unsigned char*)dec_buffer, dec_buffer_len, + (unsigned char*)encrypt_salt, MAX_IV_LEN, (unsigned char*)out_buffer, (size_t*)&out_buffer_len, NULL); + if(rc != 1) { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx); + elog(ERROR, ("failed to decrypt enc_backup_file, errmsg: %s"), errmsg); + } + + fwrite(out_buffer, 1, out_buffer_len, dec_file_fd); + } + fclose(dec_file_fd); + fclose(enc_backup_fd); + + clearCrypto(crypto_module_session, crypto_module_keyctx); + + rc = sprintf_s(sys_cmd, MAXPGPATH, "tar -xPf %s/%s.tar", backup_instance_path, data_ent->d_name); + + if (!is_valid_cmd(sys_cmd)) { + elog(ERROR, "cmd is rejected"); + return; + } + system(sys_cmd); + rc = memset_s(sys_cmd, MAXPGPATH,0, MAXPGPATH); + securec_check(rc, "\0", "\0"); + + rc = sprintf_s(sys_cmd, MAXPGPATH, "rm -rf %s/%s.tar", backup_instance_path, data_ent->d_name); + + if (!is_valid_cmd(sys_cmd)) { + elog(ERROR, "cmd is rejected"); + return; + } + system(sys_cmd); + rc = memset_s(sys_cmd, MAXPGPATH,0, MAXPGPATH); + securec_check(rc, "\0", "\0"); + enc_flag = true; + } + } + + if (data_dir) { + fio_closedir(data_dir); + data_dir = NULL; + } +} + +/* + * Function: delete_one_instance_backup_directory + * + * Return: + * void + */ +static void delete_one_instance_backup_directory(char *instance_name_str) +{ + char backup_instance_path[MAXPGPATH]; + DIR *data_dir = NULL; + struct dirent *data_ent = NULL; + char sys_cmd[MAXPGPATH] = {0}; + + errno_t rc = sprintf_s(backup_instance_path, MAXPGPATH, "%s/%s/%s", + backup_path, BACKUPS_DIR, instance_name_str); + securec_check_ss_c(rc, "\0", "\0"); + + data_dir = fio_opendir(backup_instance_path, FIO_BACKUP_HOST); + if (data_dir == NULL) + { + elog(ERROR, "cannot open directory \"%s\": %s", backup_instance_path, + strerror(errno)); + return; + } + + for (;(data_ent = fio_readdir(data_dir)) != NULL; errno = 0) + { + if (IsDir(backup_instance_path, data_ent->d_name, FIO_BACKUP_HOST) && data_ent->d_name[0] != '.') { + error_t rc = sprintf_s(sys_cmd, MAXPGPATH, "rm %s/%s -rf", backup_instance_path,data_ent->d_name); + securec_check_ss_c(rc, "\0", "\0"); + if (!is_valid_cmd(sys_cmd)) { + elog(ERROR, "cmd is rejected"); + return; + } + + system(sys_cmd); + rc = memset_s(sys_cmd, MAXPGPATH,0, MAXPGPATH); + securec_check(rc, "\0", "\0"); + } + } + + if (data_dir) { + fio_closedir(data_dir); + data_dir = NULL; + } + +} + +/* + * Function: delete_backup_directory + * Description: + * + * Input: + * char *instance_name + * Return: + * void + */ +void delete_backup_directory(char *instance_name_str) +{ + int i = 0; + if(NULL == encrypt_dev_params || !enc_flag) { + return; + } + + if (instance_name_str == NULL) { + parray *instances = catalog_get_instance_list(); + + for (i = 0; i < parray_num(instances); i++) + { + InstanceConfig *instance = (InstanceConfig *)parray_get(instances, i); + delete_one_instance_backup_directory(instance->name); + } + parray_walk(instances, pfree); + parray_free(instances); + + return; + } + + delete_one_instance_backup_directory(instance_name_str); + +} + diff --git a/src/bin/pg_probackup/common_cipher.cpp b/src/bin/pg_probackup/common_cipher.cpp new file mode 100644 index 0000000000..29ca68922c --- /dev/null +++ b/src/bin/pg_probackup/common_cipher.cpp @@ -0,0 +1,232 @@ +#include "common_cipher.h" +#include "securec.h" +#include "securec_check.h" +#include "port.h" +#include "libpq/pqcomm.h" + +#define MAX_PROVIDER_NAME_LEN 128 +#define MAX_ERRMSG_LEN 256 + + +typedef int (*crypto_module_init_type)(char *load_info, SupportedFeature *supported_feature); +typedef int (*crypto_module_sess_init_type)(char *key_info, void **sess); +typedef void (*crypto_module_sess_exit_type)(void *sess); +typedef int (*crypto_result_size_type)(void *ctx, int enc, size_t data_size); +typedef void (*crypto_ctx_clean_type)(void *ctx); +typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); +typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); +typedef void (*crypto_hmac_clean_type)(void *ctx); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); +typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); +typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); + + +static void *libhandle = NULL; + +static crypto_module_init_type crypto_module_init_use = NULL; +static crypto_module_sess_init_type crypto_module_sess_init_use = NULL; +static crypto_module_sess_exit_type crypto_module_sess_exit_use = NULL; +crypto_create_symm_key_type crypto_create_symm_key_use = NULL; +crypto_ctx_init_type crypto_ctx_init_use = NULL; +static crypto_result_size_type crypto_result_size_use = NULL; +static crypto_ctx_clean_type crypto_ctx_clean_use = NULL; +crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; +static crypto_digest_type crypto_digest_use = NULL; +static crypto_hmac_init_type crypto_hmac_init_use = NULL; +static crypto_hmac_clean_type crypto_hmac_clean_use = NULL; +static crypto_hmac_type crypto_hmac_use = NULL; +static crypto_gen_random_type crypto_gen_random_use = NULL; +static crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; +crypto_get_errmsg_type crypto_get_errmsg_use = NULL; + +bool load_crypto_module_lib() +{ + errno_t rc = 0; + char libpath[1024] = {0}; + char* gaussHome = gs_getenv_r("GAUSSHOME"); + if(check_client_env(gaussHome) == NULL) { + fprintf(stderr, "crypto module get GAUSSHOME failed."); + exit(1); + } + + rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); + securec_check_ss_c(rc, "", ""); + + libhandle = dlopen(libpath, RTLD_LAZY); + if (libhandle == NULL) { + return false; + } + + crypto_module_init_use = (crypto_module_init_type)dlsym(libhandle, "crypto_module_init"); + crypto_module_sess_init_use = (crypto_module_sess_init_type)dlsym(libhandle, "crypto_module_sess_init"); + crypto_module_sess_exit_use = (crypto_module_sess_exit_type)dlsym(libhandle, "crypto_module_sess_exit"); + crypto_create_symm_key_use = (crypto_create_symm_key_type)dlsym(libhandle, "crypto_create_symm_key"); + crypto_ctx_init_use = (crypto_ctx_init_type)dlsym(libhandle, "crypto_ctx_init"); + crypto_result_size_use = (crypto_result_size_type)dlsym(libhandle, "crypto_result_size"); + crypto_ctx_clean_use = (crypto_ctx_clean_type)dlsym(libhandle, "crypto_ctx_clean"); + crypto_encrypt_decrypt_use = (crypto_encrypt_decrypt_type)dlsym(libhandle, "crypto_encrypt_decrypt"); + crypto_digest_use = (crypto_digest_type)dlsym(libhandle, "crypto_digest"); + crypto_hmac_init_use = (crypto_hmac_init_type)dlsym(libhandle, "crypto_hmac_init"); + crypto_hmac_clean_use = (crypto_hmac_clean_type)dlsym(libhandle, "crypto_hmac_clean"); + crypto_hmac_use = (crypto_hmac_type)dlsym(libhandle, "crypto_hmac"); + crypto_gen_random_use = (crypto_gen_random_type)dlsym(libhandle, "crypto_gen_random"); + crypto_deterministic_enc_dec_use = (crypto_deterministic_enc_dec_type)dlsym(libhandle, "crypto_deterministic_enc_dec"); + crypto_get_errmsg_use = (crypto_get_errmsg_type)dlsym(libhandle, "crypto_get_errmsg"); + + if (crypto_module_init_use == NULL + || crypto_module_sess_init_use == NULL + || crypto_module_sess_exit_use == NULL + || crypto_create_symm_key_use == NULL + || crypto_ctx_init_use == NULL + || crypto_result_size_use == NULL + || crypto_ctx_clean_use == NULL + || crypto_encrypt_decrypt_use == NULL + || crypto_digest_use == NULL + || crypto_hmac_init_use == NULL + || crypto_hmac_clean_use == NULL + || crypto_hmac_use == NULL + || crypto_gen_random_use == NULL + || crypto_deterministic_enc_dec_use == NULL + || crypto_get_errmsg_use == NULL) { + dlclose(libhandle); + return false; + } + + return true; +} + +void unload_crypto_module() +{ + if (libhandle) { + dlclose(libhandle); + libhandle = NULL; + } +} + +int transform_type(const char* type) +{ + if (strcmp(type, "AES128_CBC") == 0) { + return MODULE_AES_128_CBC; + } else if (strcmp(type, "AES128_CTR") == 0) { + return MODULE_AES_128_CTR; + } else if (strcmp(type, "AES128_GCM") == 0) { + return MODULE_AES_128_GCM; + } else if (strcmp(type, "AES256_CBC") == 0) { + return MODULE_AES_256_CBC; + } else if (strcmp(type, "AES256_CTR") == 0) { + return MODULE_AES_256_CTR; + } else if (strcmp(type, "AES256_GCM") == 0) { + return MODULE_AES_256_GCM; + } else if (strcmp(type, "SM4_CBC") == 0) { + return MODULE_SM4_CBC; + } else if (strcmp(type, "SM4_CTR") == 0) { + return MODULE_SM4_CTR; + } + + return -1; + +} + +void initCryptoModule(char* crypto_module_params, const char* encrypt_mode) +{ + int ret = 1; + SupportedFeature supportedfeature; + int modulType = 0; + + char errmsg[MAX_ERRMSG_LEN] = {0}; + + if (NULL == encrypt_mode) { + fprintf(stderr, "encrypt mode cannot be NULL."); + exit(1); + } + ret = crypto_module_init_use(crypto_module_params, &supportedfeature); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + + modulType = transform_type(encrypt_mode); + if (modulType < 0 || supportedfeature.supported_symm[modulType] == 0) { + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + +} + +void initCryptoSession(void** crypto_module_session) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_module_sess_init_use(NULL, crypto_module_session); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + +} + +void releaseCryptoSession(void* crypto_module_session) +{ + if (libhandle && crypto_module_session) { + crypto_module_sess_exit_use(crypto_module_session); + crypto_module_session = NULL; + } +} + +void releaseCryptoCtx(void* crypto_module_keyctx) +{ + if (libhandle && crypto_module_keyctx) { + crypto_ctx_clean_use(crypto_module_keyctx); + crypto_module_keyctx = NULL; + } +} + +void clearCrypto(void* crypto_module_session, void* crypto_module_keyctx) +{ + releaseCryptoCtx(crypto_module_keyctx); + releaseCryptoSession(crypto_module_session); + unload_crypto_module(); +} + +void CryptoModuleParamsCheck(bool gen_key, char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt) +{ + errno_t rc = 0; + + if (!load_crypto_module_lib()) { + fprintf(stderr, ("load crypto module lib failed\n")); + exit(1); + } + + if (module_encrypt_mode == NULL || params == NULL) { + fprintf(stderr, ("encrypt mode and crypto module params cannot be NULL\n")); + exit(1); + } else { + initCryptoModule(params, module_encrypt_mode); + } + + if (gen_key && NULL != module_encrypt_key) { + fprintf(stderr, ("--gen-key cannot be used with --with-key at the same time.")); + } + + if (module_encrypt_salt == NULL || strlen(module_encrypt_salt) != MAX_IV_LEN) { + fprintf(stderr, ("invalid salt, salt is needed and must be 16 bytes\n")); + exit(1); + } + + if (module_encrypt_key) { + char *tmpkey = NULL; + unsigned int tmpkeylen = 0; + + tmpkey = SEC_decodeBase64(module_encrypt_key, &tmpkeylen); + if (tmpkey == NULL || tmpkeylen > KEY_MAX_LEN) { + if (tmpkey) { + OPENSSL_free(tmpkey); + } + fprintf(stderr, ("invalid key\n")); + exit(1); + } + } +} \ No newline at end of file diff --git a/src/bin/pg_probackup/common_cipher.h b/src/bin/pg_probackup/common_cipher.h new file mode 100644 index 0000000000..66b020cc61 --- /dev/null +++ b/src/bin/pg_probackup/common_cipher.h @@ -0,0 +1,83 @@ +#ifndef COMMON_CIPHER_H +#define COMMON_CIPHER_H +#include +#include +#include +#include +#include +#include +#include "utils/aes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_CRYPTO_MODULE_LEN 8192 +#define CRYPTO_BLOCK_SIZE 16 +#define MAX_ENCRYPT_LEN (MAX_CRYPTO_MODULE_LEN - CRYPTO_BLOCK_SIZE) /*加密算法补pad模式为强补,最多可以补16字节,所以最大加密长度少16字节,则密文最长8192、保证读取时可以整块密文读入*/ + +#define MAX_PROVIDER_NAME_LEN 128 +#define MAX_ERRMSG_LEN 256 +#define MAX_IV_LEN 16 + +typedef enum { + MODULE_AES_128_CBC = 0, + MODULE_AES_128_CTR, + MODULE_AES_128_GCM, + MODULE_AES_256_CBC, + MODULE_AES_256_CTR, + MODULE_AES_256_GCM, + MODULE_SM4_CBC, + MODULE_SM4_CTR, + MODULE_HMAC_SHA256, + MODULE_HMAC_SM3, + MODULE_DETERMINISTIC_KEY, + MODULE_ALGO_MAX = 1024 +} ModuleSymmKeyAlgo; + +typedef enum { + MODULE_SHA256 = 0, + MODULE_SM3, + MODULE_DIGEST_MAX = 1024 +} ModuleDigestAlgo; + +typedef enum { + KEY_TYPE_INVALID, + KEY_TYPE_PLAINTEXT, + KEY_TYPE_CIPHERTEXT, + KEY_TYPE_NAMEORIDX, + KEY_TYPE_MAX +} KeyType; + +typedef struct { + char provider_name[MAX_PROVIDER_NAME_LEN]; + KeyType key_type; + int supported_symm[MODULE_ALGO_MAX]; // 不支持算法填入0或者支持算法填入1 + int supported_digest[MODULE_DIGEST_MAX]; // 不支持算法填入0或者支持算法填入1 +} SupportedFeature; + +typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +typedef int (*crypto_create_symm_key_type)(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size); +typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); +typedef int (*crypto_ctx_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size); + +extern crypto_create_symm_key_type crypto_create_symm_key_use; +extern crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use; +extern crypto_get_errmsg_type crypto_get_errmsg_use; +extern crypto_ctx_init_type crypto_ctx_init_use; + +extern int transform_type(const char* type); +extern bool load_crypto_module_lib(); +extern void unload_crypto_module(); +extern void initCryptoModule(char* crypto_module_params, const char* encrypt_mode); +extern void initCryptoSession(void** crypto_module_session); +extern void releaseCryptoSession(void* crypto_module_session); +extern void releaseCryptoCtx(void* crypto_module_keyctx); +extern void clearCrypto(void* crypto_module_session, void* crypto_module_keyctx); +extern void CryptoModuleParamsCheck(bool gen_key, char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt); + +#ifdef __cplusplus +} +#endif + +#endif /*COMMON_CIPHER_H*/ \ No newline at end of file diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index 546a789b46..120f1f510b 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -26,6 +26,7 @@ #include "storage/dss/dss_adaptor.h" #include #include "oss/include/restore.h" +#include "common_cipher.h" #define MIN_ULIMIT_STACK_SIZE 8388608 // 1024 * 1024 * 8 #define PROG_NAME "gs_probackup" @@ -151,6 +152,20 @@ static pgSetBackupParams *set_backup_params = NULL; pgBackup current; static ProbackupSubcmd backup_subcmd = NO_CMD; +/* encrypt options */ +bool gen_key = false; +char* encrypt_mode = NULL; +char* encrypt_key = NULL; +char* encrypt_salt = NULL; +char* encrypt_dev_params = NULL; +void* crypto_module_session = NULL; +void* crypto_module_keyctx = NULL; + +/* Mark whether encryption and decryption are performed */ +/* only when encryption and decryption are actually performed will it be marked as true */ +/* Avoid specifying encryption in non-encrypted mode to cause accidental deletion of directories */ +bool enc_flag = false; + /* Oss Client*/ void* oss_client = NULL; @@ -200,6 +215,11 @@ static ConfigOption cmd_options[] = { 'b', 154, "skip-block-validation", &skip_block_validation, SOURCE_CMD_STRICT }, { 'b', 156, "skip-external-dirs", &skip_external_dirs, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", (void *)opt_incr_restore_mode, SOURCE_CMD_STRICT }, + { 's', 187, "with-encryption", &encrypt_mode, SOURCE_CMD_STRICT}, + { 's', 188, "with-key", &encrypt_key, SOURCE_CMD_STRICT}, + { 's', 189, "with-salt", &encrypt_salt, SOURCE_CMD_STRICT}, + { 's', 190, "with-device-params", &encrypt_dev_params, SOURCE_CMD_STRICT}, + { 'b', 191, "gen-key", &gen_key, SOURCE_CMD_STRICT}, { 'b', 145, "wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 146, "expired", &delete_expired, SOURCE_CMD_STRICT }, { 's', 172, "status", &delete_status, SOURCE_CMD_STRICT }, @@ -519,7 +539,7 @@ static void parse_cmdline_args(int argc, char *argv[], const char *command_name) if (backup_path && !is_absolute_path(backup_path)) elog(ERROR, "-B, --backup-path must be an absolute path"); - parse_instance_name(); + parse_instance_name(); } @@ -617,9 +637,10 @@ static int do_actual_operate() /* Should not happen */ elog(ERROR, "Unknown subcommand"); } - + delete_backup_directory(instance_name); on_cleanup(); release_logfile(); + clearCrypto(crypto_module_session, crypto_module_keyctx); return res; } diff --git a/src/bin/pg_probackup/pg_probackupc.h b/src/bin/pg_probackup/pg_probackupc.h index 095c73ec5a..2b13c9c1c6 100644 --- a/src/bin/pg_probackup/pg_probackupc.h +++ b/src/bin/pg_probackup/pg_probackupc.h @@ -81,6 +81,18 @@ extern char *instance_name; extern bool specify_extdir; extern bool specify_tbsdir; +extern bool gen_key; +extern char* encrypt_mode ; +extern char* encrypt_key ; +extern char* encrypt_salt; +extern char* encrypt_dev_params; +extern void* crypto_module_session; +extern void* crypto_module_keyctx; + +extern bool do_enc; +extern bool enc_flag; + + /* show options */ extern ShowFormat show_format; @@ -98,6 +110,9 @@ extern char** commands_args; /* exclude directory list for $PGDATA file listing */ extern const char *pgdata_exclude_dir[]; +extern bool is_valid_cmd(char *cmd); +extern void delete_backup_directory(char *instance_name); + /* in backup.c */ extern int do_backup(time_t start_time, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs, bool backup_replslots); -- Gitee From 8da20f24e4353c025194bc9d9ff0474b12912311 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=80=B6=E6=A2=A6=E5=8A=A0=E5=BE=97?= Date: Tue, 3 Sep 2024 18:49:29 +0800 Subject: [PATCH 252/347] =?UTF-8?q?probackup=E7=A1=AC=E4=BB=B6=E5=8A=A0?= =?UTF-8?q?=E5=AF=86=E6=B7=BB=E5=8A=A0hmac=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/backup.cpp | 46 +++++++++++++++++---- src/bin/pg_probackup/catalog.cpp | 56 ++++++++++++++++++-------- src/bin/pg_probackup/common_cipher.cpp | 28 ++++++++++--- src/bin/pg_probackup/common_cipher.h | 8 +++- src/bin/pg_probackup/pg_probackup.cpp | 3 +- src/bin/pg_probackup/pg_probackupc.h | 1 + 6 files changed, 110 insertions(+), 32 deletions(-) diff --git a/src/bin/pg_probackup/backup.cpp b/src/bin/pg_probackup/backup.cpp index 8c92425974..851055fcab 100644 --- a/src/bin/pg_probackup/backup.cpp +++ b/src/bin/pg_probackup/backup.cpp @@ -2915,6 +2915,7 @@ static void compress_encrypt_directory() int ret = 0; bool res = false; uint key_len = 0; + uint hmac_len = MAX_HMAC_LEN; uint enc_buffer_len = 0; uint out_buffer_len = 0; long int backup_tar_pos = 0; @@ -2923,6 +2924,7 @@ static void compress_encrypt_directory() char sys_cmd[MAXPGPATH] = {0}; char tar_file[MAXPGPATH] = {0}; char enc_file[MAXPGPATH] = {0}; + unsigned char hmac_buffer[MAX_HMAC_LEN + 1] = {0}; unsigned char enc_buffer[MAX_ENCRYPT_LEN + 1] = {0}; unsigned char out_buffer[MAX_ENCRYPT_LEN + 1] = {0}; char errmsg[MAX_ERRMSG_LEN] = {0}; @@ -2969,25 +2971,43 @@ static void compress_encrypt_directory() ret = crypto_create_symm_key_use(crypto_module_session, (ModuleSymmKeyAlgo)algo, (unsigned char*)key, (size_t*)&key_len); if (ret != 1) { crypto_get_errmsg_use(NULL, errmsg); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, "crypto module gen key error, errmsg:%s\n", errmsg); } } else { key = SEC_decodeBase64(encrypt_key, &key_len); if (NULL == key) { - clearCrypto(crypto_module_session, crypto_module_keyctx); - elog(ERROR, "crypto module decode key error, please check --with-key."); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, "crypto module decode key error, please check --with-key.\n"); } } + encrypt_key = SEC_encodeBase64(key, (GS_UINT32)key_len); + if (NULL == encrypt_key) { + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, "crypto module encode key error.\n"); + } + + elog(INFO, "crypto module encrypt with key: %s , salt: %s \n", encrypt_key, encrypt_salt); + ret = crypto_ctx_init_use(crypto_module_session, &crypto_module_keyctx, (ModuleSymmKeyAlgo)algo, 1, (unsigned char*)key, key_len); if (ret != 1) { crypto_get_errmsg_use(NULL, errmsg); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, "crypto keyctx init error, errmsg:%s\n", errmsg); } + algo = getHmacType((ModuleSymmKeyAlgo)algo); + + ret = crypto_hmac_init_use(crypto_module_session, &crypto_hmac_keyctx, (ModuleSymmKeyAlgo)algo, (unsigned char*)key, key_len); + if (ret != 1) + { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, "crypto hmac keyctx init error, errmsg:%s\n", errmsg); + } + fseek(backup_tar_fd,0,SEEK_END); backup_tar_length = ftell(backup_tar_fd); fseek(backup_tar_fd,0,SEEK_SET); @@ -3011,25 +3031,35 @@ static void compress_encrypt_directory() ret = memset_s(out_buffer, MAX_ENCRYPT_LEN + 1, '\0', MAX_ENCRYPT_LEN + 1); securec_check(ret, "\0", "\0"); + ret = memset_s(hmac_buffer, MAX_HMAC_LEN + 1, '\0', MAX_HMAC_LEN + 1); + securec_check(ret, "\0", "\0"); + + ret = crypto_hmac_use(crypto_hmac_keyctx, (unsigned char*)enc_buffer, enc_buffer_len, hmac_buffer, (size_t*)&hmac_len); + if (ret != 1) { + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, ("failed to calculate hmac\n")); + } + ret = crypto_encrypt_decrypt_use(crypto_module_keyctx, 1, (unsigned char*)enc_buffer, enc_buffer_len, (unsigned char*)encrypt_salt, MAX_IV_LEN, out_buffer, (size_t*)&out_buffer_len, NULL); if (ret != 1) { - clearCrypto(crypto_module_session, crypto_module_keyctx); - elog(ERROR, ("failed to encrypt backup file")); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, ("failed to encrypt backup file\n")); } fwrite(out_buffer, 1, out_buffer_len, enc_backup_fd); + fwrite(hmac_buffer, 1, hmac_len, enc_backup_fd); } fclose(backup_tar_fd); fclose(enc_backup_fd); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); rc = sprintf_s(sys_cmd, MAXPGPATH, "rm %s %s.tar -rf", current.root_dir, current.root_dir); securec_check_ss_c(rc, "\0", "\0"); if (!is_valid_cmd(sys_cmd)) { - elog(ERROR, "cmd is rejected"); + elog(ERROR, "cmd is rejected\n"); } system(sys_cmd); enc_flag = true; diff --git a/src/bin/pg_probackup/catalog.cpp b/src/bin/pg_probackup/catalog.cpp index d79c7bc2b7..b257429ee4 100644 --- a/src/bin/pg_probackup/catalog.cpp +++ b/src/bin/pg_probackup/catalog.cpp @@ -2884,13 +2884,16 @@ static void uncompress_decrypt_directory(const char *instance_name_str) DIR *data_dir = NULL; struct dirent *data_ent = NULL; uint key_len = 0; + uint hmac_len = MAX_HMAC_LEN; uint key_idx_uint = 0; uint dec_buffer_len = 0; uint out_buffer_len = MAX_CRYPTO_MODULE_LEN; long int enc_file_pos = 0; long int enc_file_len = 0; char sys_cmd[MAXPGPATH] = {0}; - char* key = NULL; + char* key = NULL; + unsigned char hmac_read_buffer[MAX_HMAC_LEN +1] = {0}; + unsigned char hmac_cal_buffer[MAX_HMAC_LEN +1] = {0}; unsigned char dec_buffer[MAX_CRYPTO_MODULE_LEN + 1] = {0}; unsigned char out_buffer[MAX_CRYPTO_MODULE_LEN + 1] = {0}; char enc_backup_file[MAXPGPATH] = {0}; @@ -2912,22 +2915,29 @@ static void uncompress_decrypt_directory(const char *instance_name_str) rc = crypto_create_symm_key_use(crypto_module_session, (ModuleSymmKeyAlgo)algo, (unsigned char*)key, (size_t*)&key_len); if (rc != 1) { crypto_get_errmsg_use(NULL, errmsg); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, "crypto module gen key error, errmsg:%s\n", errmsg); } } else { key = SEC_decodeBase64(encrypt_key, &key_len); if (NULL == key) { - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, "crypto module decode key error, please check --with-key."); } } rc = crypto_ctx_init_use(crypto_module_session, &crypto_module_keyctx, (ModuleSymmKeyAlgo)algo, 0, (unsigned char*)key, key_len); - if (rc != 1) - { + if (rc != 1) { crypto_get_errmsg_use(NULL, errmsg); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, "crypto keyctx init error, errmsg:%s\n", errmsg); + } + + algo = getHmacType((ModuleSymmKeyAlgo)algo); + rc = crypto_hmac_init_use(crypto_module_session, &crypto_hmac_keyctx, (ModuleSymmKeyAlgo)algo, (unsigned char*)key, key_len); + if (rc != 1) { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, "crypto keyctx init error, errmsg:%s\n", errmsg); } @@ -2949,7 +2959,7 @@ static void uncompress_decrypt_directory(const char *instance_name_str) FILE* enc_backup_fd = fopen(enc_backup_file,"rb"); if(NULL == enc_backup_fd) { - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, ("failed to create or open encrypt backup file.")); return; } @@ -2968,17 +2978,18 @@ static void uncompress_decrypt_directory(const char *instance_name_str) while(enc_file_pos < enc_file_len) { - if(enc_file_pos + MAX_CRYPTO_MODULE_LEN < enc_file_len) { - memset_s(dec_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); - memset_s(out_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + memset_s(dec_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + memset_s(out_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); + + if(enc_file_pos + MAX_CRYPTO_MODULE_LEN + MAX_HMAC_LEN < enc_file_len) { fread(dec_buffer, 1, MAX_CRYPTO_MODULE_LEN, enc_backup_fd); + fread(hmac_read_buffer, 1, MAX_HMAC_LEN, enc_backup_fd); dec_buffer_len = MAX_CRYPTO_MODULE_LEN; - enc_file_pos += MAX_CRYPTO_MODULE_LEN; + enc_file_pos += (MAX_CRYPTO_MODULE_LEN + MAX_HMAC_LEN); } else { - memset_s(dec_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); - memset_s(out_buffer, MAX_CRYPTO_MODULE_LEN, 0, MAX_CRYPTO_MODULE_LEN); - fread(dec_buffer, 1, enc_file_len - enc_file_pos, enc_backup_fd); - dec_buffer_len = enc_file_len - enc_file_pos; + fread(dec_buffer, 1, enc_file_len - (enc_file_pos + MAX_HMAC_LEN), enc_backup_fd); + fread(hmac_read_buffer, 1, MAX_HMAC_LEN, enc_backup_fd); + dec_buffer_len = enc_file_len - (enc_file_pos + MAX_HMAC_LEN); enc_file_pos = enc_file_len; } @@ -2986,16 +2997,27 @@ static void uncompress_decrypt_directory(const char *instance_name_str) (unsigned char*)encrypt_salt, MAX_IV_LEN, (unsigned char*)out_buffer, (size_t*)&out_buffer_len, NULL); if(rc != 1) { crypto_get_errmsg_use(NULL, errmsg); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); elog(ERROR, ("failed to decrypt enc_backup_file, errmsg: %s"), errmsg); } + rc = crypto_hmac_use(crypto_hmac_keyctx, (unsigned char*)out_buffer, out_buffer_len, hmac_cal_buffer, (size_t*)&hmac_len); + if(rc != 1) { + crypto_get_errmsg_use(NULL, errmsg); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); + elog(ERROR, ("failed to calculate hmac, errmsg: %s"), errmsg); + } + + if (strncmp((char*)hmac_cal_buffer, (char*)hmac_read_buffer, (size_t)hmac_len) != 0) { + elog(ERROR, ("hmac verify failed\n")); + } + fwrite(out_buffer, 1, out_buffer_len, dec_file_fd); } fclose(dec_file_fd); fclose(enc_backup_fd); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); rc = sprintf_s(sys_cmd, MAXPGPATH, "tar -xPf %s/%s.tar", backup_instance_path, data_ent->d_name); diff --git a/src/bin/pg_probackup/common_cipher.cpp b/src/bin/pg_probackup/common_cipher.cpp index 29ca68922c..c7185a0ca2 100644 --- a/src/bin/pg_probackup/common_cipher.cpp +++ b/src/bin/pg_probackup/common_cipher.cpp @@ -14,9 +14,7 @@ typedef void (*crypto_module_sess_exit_type)(void *sess); typedef int (*crypto_result_size_type)(void *ctx, int enc, size_t data_size); typedef void (*crypto_ctx_clean_type)(void *ctx); typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); -typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); typedef void (*crypto_hmac_clean_type)(void *ctx); -typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); @@ -32,9 +30,9 @@ static crypto_result_size_type crypto_result_size_use = NULL; static crypto_ctx_clean_type crypto_ctx_clean_use = NULL; crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; static crypto_digest_type crypto_digest_use = NULL; -static crypto_hmac_init_type crypto_hmac_init_use = NULL; +crypto_hmac_init_type crypto_hmac_init_use = NULL; static crypto_hmac_clean_type crypto_hmac_clean_use = NULL; -static crypto_hmac_type crypto_hmac_use = NULL; +crypto_hmac_type crypto_hmac_use = NULL; static crypto_gen_random_type crypto_gen_random_use = NULL; static crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; crypto_get_errmsg_type crypto_get_errmsg_use = NULL; @@ -127,6 +125,17 @@ int transform_type(const char* type) } +int getHmacType(ModuleSymmKeyAlgo algo) +{ + if (algo >= MODULE_AES_128_CBC && algo <= MODULE_AES_256_GCM) { + return MODULE_HMAC_SHA256; + } else if (algo == MODULE_SM4_CBC || algo == MODULE_SM4_CTR) { + return MODULE_HMAC_SM3; + } + + return MODULE_ALGO_MAX; +} + void initCryptoModule(char* crypto_module_params, const char* encrypt_mode) { int ret = 1; @@ -184,8 +193,17 @@ void releaseCryptoCtx(void* crypto_module_keyctx) } } -void clearCrypto(void* crypto_module_session, void* crypto_module_keyctx) +void releaseHmacCtx(void* crypto_hmac_keyctx) +{ + if (libhandle && crypto_hmac_keyctx) { + crypto_hmac_clean_use(crypto_hmac_keyctx); + crypto_hmac_keyctx = NULL; + } +} + +void clearCrypto(void* crypto_module_session, void* crypto_module_keyctx, void* crypto_hmac_keyctx) { + releaseHmacCtx(crypto_hmac_keyctx); releaseCryptoCtx(crypto_module_keyctx); releaseCryptoSession(crypto_module_session); unload_crypto_module(); diff --git a/src/bin/pg_probackup/common_cipher.h b/src/bin/pg_probackup/common_cipher.h index 66b020cc61..81e52bc515 100644 --- a/src/bin/pg_probackup/common_cipher.h +++ b/src/bin/pg_probackup/common_cipher.h @@ -19,6 +19,7 @@ extern "C" { #define MAX_PROVIDER_NAME_LEN 128 #define MAX_ERRMSG_LEN 256 #define MAX_IV_LEN 16 +#define MAX_HMAC_LEN 32 typedef enum { MODULE_AES_128_CBC = 0, @@ -60,20 +61,25 @@ typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *da typedef int (*crypto_create_symm_key_type)(void *sess, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t *key_id_size); typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); typedef int (*crypto_ctx_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, int enc, unsigned char *key_id, size_t key_id_size); +typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); extern crypto_create_symm_key_type crypto_create_symm_key_use; extern crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use; extern crypto_get_errmsg_type crypto_get_errmsg_use; extern crypto_ctx_init_type crypto_ctx_init_use; +extern crypto_hmac_init_type crypto_hmac_init_use; +extern crypto_hmac_type crypto_hmac_use; extern int transform_type(const char* type); +extern int getHmacType(ModuleSymmKeyAlgo algo); extern bool load_crypto_module_lib(); extern void unload_crypto_module(); extern void initCryptoModule(char* crypto_module_params, const char* encrypt_mode); extern void initCryptoSession(void** crypto_module_session); extern void releaseCryptoSession(void* crypto_module_session); extern void releaseCryptoCtx(void* crypto_module_keyctx); -extern void clearCrypto(void* crypto_module_session, void* crypto_module_keyctx); +extern void clearCrypto(void* crypto_module_session, void* crypto_module_keyctx, void* crypto_hmac_keyctx); extern void CryptoModuleParamsCheck(bool gen_key, char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt); #ifdef __cplusplus diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index 120f1f510b..c78fec59d7 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -160,6 +160,7 @@ char* encrypt_salt = NULL; char* encrypt_dev_params = NULL; void* crypto_module_session = NULL; void* crypto_module_keyctx = NULL; +void* crypto_hmac_keyctx = NULL; /* Mark whether encryption and decryption are performed */ /* only when encryption and decryption are actually performed will it be marked as true */ @@ -640,7 +641,7 @@ static int do_actual_operate() delete_backup_directory(instance_name); on_cleanup(); release_logfile(); - clearCrypto(crypto_module_session, crypto_module_keyctx); + clearCrypto(crypto_module_session, crypto_module_keyctx, crypto_hmac_keyctx); return res; } diff --git a/src/bin/pg_probackup/pg_probackupc.h b/src/bin/pg_probackup/pg_probackupc.h index 2b13c9c1c6..151fa1898f 100644 --- a/src/bin/pg_probackup/pg_probackupc.h +++ b/src/bin/pg_probackup/pg_probackupc.h @@ -88,6 +88,7 @@ extern char* encrypt_salt; extern char* encrypt_dev_params; extern void* crypto_module_session; extern void* crypto_module_keyctx; +extern void* crypto_hmac_keyctx; extern bool do_enc; extern bool enc_flag; -- Gitee From 307b67f584eb1b988042f06b6fdf386aeff6eb37 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Sat, 31 Aug 2024 17:49:34 +0800 Subject: [PATCH 253/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=88=97=E4=B8=BA?= =?UTF-8?q?=E5=85=81=E8=AE=B8NULL=E5=80=BC=E6=97=B6=EF=BC=8C=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=E6=A3=80=E6=B5=8B=E6=98=AF=E5=90=A6=E4=B8=BA=E9=80=BB?= =?UTF-8?q?=E8=BE=91=E5=A4=8D=E5=88=B6=E8=A7=A3=E7=A0=81=E5=88=97=E7=9A=84?= =?UTF-8?q?=E5=8A=9F=E8=83=BD=20=EF=BC=88cherry=20picked=20commit=20from?= =?UTF-8?q?=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/tablecmds.cpp | 14 ++++++++++++++ src/test/regress/expected/replica_identity.out | 8 ++++++++ src/test/regress/sql/replica_identity.sql | 7 +++++++ 3 files changed, 29 insertions(+) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 721eb8b5d1..88e3c4214b 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -13806,6 +13806,7 @@ static ObjectAddress ATExecDropNotNull(Relation rel, const char* colName, LOCKMO List* indexoidlist = NIL; ListCell* indexoidscan = NULL; ObjectAddress address; + Oid replidindex; /* * lookup the attribute @@ -13833,6 +13834,9 @@ static ObjectAddress ATExecDropNotNull(Relation rel, const char* colName, LOCKMO /* Loop over all indexes on the relation */ indexoidlist = RelationGetIndexList(rel); + /* replica identity index */ + replidindex = rel->rd_replidindex; + foreach (indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); HeapTuple indexTuple; @@ -13863,6 +13867,16 @@ static ObjectAddress ATExecDropNotNull(Relation rel, const char* colName, LOCKMO } } + /* REPLICA IDENTIFY can't drop not null */ + if (replidindex == indexoid) { + for (i = 0; i < indnkeyatts; i++) { + if (indexStruct->indkey.values[i] == attnum) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("column \"%s\" used as replica identity can't drop not null", colName))); + } + } + ReleaseSysCache(indexTuple); } diff --git a/src/test/regress/expected/replica_identity.out b/src/test/regress/expected/replica_identity.out index ab2698531a..ab27a61017 100644 --- a/src/test/regress/expected/replica_identity.out +++ b/src/test/regress/expected/replica_identity.out @@ -80,6 +80,13 @@ SELECT count(*) FROM pg_index WHERE indrelid = 'test_replica_identity'::regclass 1 (1 row) +---- +-- Make sure can't alter replica identity index alown NULL +---- +CREATE UNIQUE INDEX uni_idx_keya ON test_replica_identity (keya); +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX uni_idx_keya; +ALTER TABLE test_replica_identity MODIFY keya NULL; -- fail +ERROR: column "keya" used as replica identity can't drop not null ---- -- Make sure non index cases work ---- @@ -113,6 +120,7 @@ SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; nonkey | text | Indexes: "test_replica_identity_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default + "uni_idx_keya" UNIQUE, btree (keya) TABLESPACE pg_default "test_replica_identity_hash" hash (nonkey) TABLESPACE pg_default "test_replica_identity_keyab" btree (keya, keyb) TABLESPACE pg_default Replica Identity: FULL diff --git a/src/test/regress/sql/replica_identity.sql b/src/test/regress/sql/replica_identity.sql index 0bdbfb17ea..6565ef329d 100644 --- a/src/test/regress/sql/replica_identity.sql +++ b/src/test/regress/sql/replica_identity.sql @@ -41,6 +41,13 @@ SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; SELECT count(*) FROM pg_index WHERE indrelid = 'test_replica_identity'::regclass AND indisreplident; +---- +-- Make sure can't alter replica identity index alown NULL +---- +CREATE UNIQUE INDEX uni_idx_keya ON test_replica_identity (keya); +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX uni_idx_keya; +ALTER TABLE test_replica_identity MODIFY keya NULL; -- fail + ---- -- Make sure non index cases work ---- -- Gitee From 8f32c4fc9fe6ed1f663050782845d4c24321b63d Mon Sep 17 00:00:00 2001 From: lyoursly Date: Thu, 29 Aug 2024 14:59:52 +0800 Subject: [PATCH 254/347] =?UTF-8?q?=E6=B7=BB=E5=8A=A0GAUSSHOME=E7=8E=AF?= =?UTF-8?q?=E5=A2=83=E5=8F=98=E9=87=8F=E5=AE=A1=E6=9F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/common_cipher.cpp | 10 +++++++--- src/bin/psql/common_cipher.cpp | 10 ++++++++-- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/bin/pg_dump/common_cipher.cpp b/src/bin/pg_dump/common_cipher.cpp index 3b1a06333f..e97bc86e81 100644 --- a/src/bin/pg_dump/common_cipher.cpp +++ b/src/bin/pg_dump/common_cipher.cpp @@ -1,5 +1,6 @@ #include "pg_backup_cipher.h" #include "port.h" +#include "libpq/pqcomm.h" #define MAX_PROVIDER_NAME_LEN 128 #define MAX_ERRMSG_LEN 256 @@ -78,9 +79,12 @@ bool load_crypto_module_lib() errno_t rc = 0; char libpath[1024] = {0}; char* gaussHome = gs_getenv_r("GAUSSHOME"); - - rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); - securec_check_ss_c(rc, "", ""); + if (check_client_env(gaussHome)){ + rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); + securec_check_ss_c(rc, "", ""); + } else { + exit_horribly(NULL, "$GAUSSHOME set error or net set\n"); + } libhandle = dlopen(libpath, RTLD_LAZY); if (libhandle == NULL) { diff --git a/src/bin/psql/common_cipher.cpp b/src/bin/psql/common_cipher.cpp index 17232488a0..6066246b63 100644 --- a/src/bin/psql/common_cipher.cpp +++ b/src/bin/psql/common_cipher.cpp @@ -2,6 +2,7 @@ #include "securec.h" #include "securec_check.h" #include "port.h" +#include "libpq/pqcomm.h" #define MAX_PROVIDER_NAME_LEN 128 #define MAX_ERRMSG_LEN 256 @@ -83,8 +84,13 @@ bool load_crypto_module_lib() char libpath[1024] = {0}; char* gaussHome = gs_getenv_r("GAUSSHOME"); - rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); - securec_check_ss_c(rc, "", ""); + if (check_client_env(gaussHome)){ + rc = snprintf_s(libpath, sizeof(libpath), sizeof(libpath) - 1, "%s/lib/postgresql/common_cipher.so",gaussHome); + securec_check_ss_c(rc, "", ""); + } else { + fprintf(stderr, "$GAUSSHOME set error or net set\n"); + exit(1); + } libhandle = dlopen(libpath, RTLD_LAZY); if (libhandle == NULL) { -- Gitee From 6e0800f762bdaa768c444c4a98cb67f776ebf22e Mon Sep 17 00:00:00 2001 From: lyoursly Date: Tue, 3 Sep 2024 14:14:07 +0800 Subject: [PATCH 255/347] =?UTF-8?q?=E6=94=AF=E6=8C=81hmac?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/common_cipher.cpp | 59 ++++++++++++++++++++++-- src/bin/pg_dump/pg_backup.h | 1 + src/bin/pg_dump/pg_backup_archiver.cpp | 5 ++- src/bin/pg_dump/pg_backup_cipher.h | 10 ++++- src/bin/pg_dump/pg_backup_directory.cpp | 32 ++++++++++--- src/bin/psql/common_cipher.cpp | 60 +++++++++++++++++++++++-- src/bin/psql/common_cipher.h | 6 +++ src/gausskernel/cbb/utils/aes/aes.cpp | 51 ++++++++++++++++++--- src/include/utils/aes.h | 8 +++- 9 files changed, 210 insertions(+), 22 deletions(-) diff --git a/src/bin/pg_dump/common_cipher.cpp b/src/bin/pg_dump/common_cipher.cpp index e97bc86e81..b65a41da06 100644 --- a/src/bin/pg_dump/common_cipher.cpp +++ b/src/bin/pg_dump/common_cipher.cpp @@ -51,7 +51,6 @@ typedef void (*crypto_ctx_clean_type)(void *ctx); typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); typedef void (*crypto_hmac_clean_type)(void *ctx); -typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); @@ -69,7 +68,7 @@ crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; static crypto_digest_type crypto_digest_use = NULL; static crypto_hmac_init_type crypto_hmac_init_use = NULL; static crypto_hmac_clean_type crypto_hmac_clean_use = NULL; -static crypto_hmac_type crypto_hmac_use = NULL; +crypto_hmac_type crypto_hmac_use = NULL; static crypto_gen_random_type crypto_gen_random_use = NULL; static crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; static crypto_get_errmsg_type crypto_get_errmsg_use = NULL; @@ -137,7 +136,7 @@ void unload_crypto_module(int code, void* args) } -static int transform_type(char* type) +static ModuleSymmKeyAlgo transform_type(char* type) { if (strcmp(type, "AES128_CBC") == 0) { return MODULE_AES_128_CBC; @@ -157,7 +156,7 @@ static int transform_type(char* type) return MODULE_SM4_CTR; } - return -1; + return MODULE_ALGO_MAX; } @@ -249,6 +248,57 @@ void symmEncDec(ArchiveHandle* AH, bool isEnc, char* indata, int inlen, char* ou ret = crypto_encrypt_decrypt_use(AH->publicArc.cryptoModlueCtx.key_ctx, isEnc, (unsigned char*)indata, inlen, AH->publicArc.rand, 16, (unsigned char*)outdata, (size_t*)outlen, NULL); if (ret != 1) { crypto_get_errmsg_use(NULL, errmsg); + releaseHmacCtx(0, AH); + releaseCryptoCtx(0, AH); + releaseCryptoSession(0, AH); + unload_crypto_module(0, NULL); + exit_horribly(NULL, "%s\n", errmsg); + } +} + +static ModuleSymmKeyAlgo getHmacType(ModuleSymmKeyAlgo symmAlgoType) +{ + if (symmAlgoType >= MODULE_AES_128_CBC && symmAlgoType <= MODULE_AES_256_GCM) { + return MODULE_HMAC_SHA256; + } else if (symmAlgoType == MODULE_SM4_CBC || symmAlgoType == MODULE_SM4_CTR){ + return MODULE_HMAC_SM3; + } + + return MODULE_ALGO_MAX; +} + +void initHmacCtx(ArchiveHandle* AH) +{ + int ret = 1; + Archive* fort = (Archive*)AH; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_hmac_init_use(fort->cryptoModlueCtx.moduleSession, &(fort->cryptoModlueCtx.hmac_ctx), getHmacType(transform_type(fort->crypto_type)), fort->Key, fort->keylen); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + crypto_module_sess_exit_use(fort->cryptoModlueCtx.moduleSession); + exit_horribly(NULL, "%s\n", errmsg); + } + +} + +void releaseHmacCtx(int code, void* args) +{ + if (libhandle && ((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.hmac_ctx) { + crypto_hmac_clean_use(((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.hmac_ctx); + ((ArchiveHandle*)args)->publicArc.cryptoModlueCtx.hmac_ctx = NULL; + } +} + +void cryptoHmac(ArchiveHandle* AH, char* indata, int inlen, char* outdata, int* outlen) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_hmac_use(AH->publicArc.cryptoModlueCtx.hmac_ctx, (unsigned char*)indata, inlen, (unsigned char*)outdata, (size_t*)outlen); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + releaseHmacCtx(0, AH); releaseCryptoCtx(0, AH); releaseCryptoSession(0, AH); unload_crypto_module(0, NULL); @@ -311,6 +361,7 @@ void CryptoModuleParamsCheck(ArchiveHandle* AH, const char* params, const char* } initCryptoKeyCtx((ArchiveHandle*)fout); + initHmacCtx((ArchiveHandle*)fout); fout->encryptfile = true; diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index 94956c384a..2d3bf5d4b6 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -65,6 +65,7 @@ typedef enum _teSection { typedef struct { void *moduleSession; void *key_ctx; + void *hmac_ctx; }CryptoModuleCtx; /* diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp index f762fbf3b6..103f5bb434 100644 --- a/src/bin/pg_dump/pg_backup_archiver.cpp +++ b/src/bin/pg_dump/pg_backup_archiver.cpp @@ -1733,7 +1733,9 @@ int ahwrite(const void* ptr, size_t size, size_t nmemb, ArchiveHandle* AH) AH->publicArc.Key, AH->publicArc.rand, AH->publicArc.cryptoModlueCtx.key_ctx, - crypto_encrypt_decrypt_use); + crypto_encrypt_decrypt_use, + AH->publicArc.cryptoModlueCtx.hmac_ctx, + crypto_hmac_use); if (!encrypt_result) exit_horribly(modulename, "Encryption failed: %s\n", strerror(errno)); } else { @@ -3727,6 +3729,7 @@ void on_exit_close_archive(Archive* AHX) { shutdown_info.AHX = AHX; on_exit_nicely(archive_close_connection, &shutdown_info); + on_exit_nicely(releaseHmacCtx, AHX); on_exit_nicely(releaseCryptoCtx, AHX); on_exit_nicely(releaseCryptoSession, AHX); on_exit_nicely(unload_crypto_module, NULL); diff --git a/src/bin/pg_dump/pg_backup_cipher.h b/src/bin/pg_dump/pg_backup_cipher.h index 089cdaa2da..4b165713ef 100644 --- a/src/bin/pg_dump/pg_backup_cipher.h +++ b/src/bin/pg_dump/pg_backup_cipher.h @@ -14,10 +14,15 @@ extern "C" { #define MAX_CRYPTO_CACHE_LEN 8192 #define CRYPTO_BLOCK_SIZE 16 -#define MAX_WRITE_CACHE_LEN (MAX_CRYPTO_CACHE_LEN - CRYPTO_BLOCK_SIZE) /*加密算法补pad模式为强补,最多可以补16字节,所以写缓存少16字节,则密文最长8192、保证读取时可以整块密文读入*/ +#define CRYPTO_HMAC_SIZE 32 +/*加密算法补pad模式为强补,最多可以补16字节,所以写缓存少16字节,又因为要带上hmac,需要再少32字节,这样能保证密文最长8192、读取时可以整块密文读入*/ +#define MAX_WRITE_CACHE_LEN (MAX_CRYPTO_CACHE_LEN - CRYPTO_BLOCK_SIZE - CRYPTO_HMAC_SIZE) typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); + extern crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use; +extern crypto_hmac_type crypto_hmac_use; extern bool load_crypto_module_lib(); extern void unload_crypto_module(int code, void* args); @@ -28,6 +33,9 @@ extern void initCryptoKeyCtx(ArchiveHandle* AH); extern void releaseCryptoCtx(int code, void* args); extern void symmEncDec(ArchiveHandle* AH, bool isEnc, char* indata, int inlen, char* outdata, int* outlen); extern void symmGenerateKey(ArchiveHandle* AH); +extern void initHmacCtx(ArchiveHandle* AH); +extern void releaseHmacCtx(int code, void* args); +extern void cryptoHmac(ArchiveHandle* AH, char* indata, int inlen, char* outdata, int* outlen); extern void CryptoModuleParamsCheck(ArchiveHandle* AH, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt, bool is_gen_key); #ifdef __cplusplus diff --git a/src/bin/pg_dump/pg_backup_directory.cpp b/src/bin/pg_dump/pg_backup_directory.cpp index b71ac8169f..f0d998684f 100644 --- a/src/bin/pg_dump/pg_backup_directory.cpp +++ b/src/bin/pg_dump/pg_backup_directory.cpp @@ -803,8 +803,18 @@ static void encryptAndFlushCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCa { char flushData[MAX_CRYPTO_CACHE_LEN] = {0}; int flushLen = MAX_CRYPTO_CACHE_LEN; + int hmacLen = 0; - symmEncDec(AH, true, cryptoCache->cryptoCache.wrCryptoCache.writeCache, cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen, flushData, &flushLen); + /*计算明文hmac,填充到密文头*/ + cryptoHmac(AH, cryptoCache->cryptoCache.wrCryptoCache.writeCache, cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen, flushData, &hmacLen); + + /*去掉填充hmac的长度作为输入*/ + flushLen = MAX_CRYPTO_CACHE_LEN - hmacLen; + + symmEncDec(AH, true, cryptoCache->cryptoCache.wrCryptoCache.writeCache, cryptoCache->cryptoCache.wrCryptoCache.writeCacheLen, flushData + hmacLen, &flushLen); + + /*输出密文长度再加上hmac的长度作为最终刷盘长度*/ + flushLen += hmacLen; cfwrite(flushData, flushLen, FH); } @@ -829,12 +839,24 @@ static void fillReadCryptoCache(ArchiveHandle* AH, DFormatCryptoCache* cryptoCac char encData[MAX_CRYPTO_CACHE_LEN] = {0}; int encLen = 0; - /*先读取文件密文,然后解密写入缓存,这里先直接放缓存*/ + /*先读取文件密文,然后解密写入缓存*/ encLen = cfread(encData, MAX_CRYPTO_CACHE_LEN, FH); - if (encLen > 0) { - cryptoCache->cryptoCache.rCryptoCache.readCacheLen = encLen; - symmEncDec(AH, false, encData, encLen, cryptoCache->cryptoCache.rCryptoCache.readCache, &(cryptoCache->cryptoCache.rCryptoCache.readCacheLen)); + if (encLen >= (CRYPTO_BLOCK_SIZE + CRYPTO_HMAC_SIZE)) { + char hmac[CRYPTO_HMAC_SIZE + 1] = {0}; + int hmacLen = 0; + + cryptoCache->cryptoCache.rCryptoCache.readCacheLen = encLen - CRYPTO_HMAC_SIZE; + symmEncDec(AH, false, encData + CRYPTO_HMAC_SIZE, encLen - CRYPTO_HMAC_SIZE, cryptoCache->cryptoCache.rCryptoCache.readCache, &(cryptoCache->cryptoCache.rCryptoCache.readCacheLen)); + + /*对明文做hmac进行校验*/ + cryptoHmac(AH, cryptoCache->cryptoCache.rCryptoCache.readCache, cryptoCache->cryptoCache.rCryptoCache.readCacheLen, hmac, &hmacLen); + + if (hmacLen != CRYPTO_HMAC_SIZE || strncmp(hmac, encData, CRYPTO_HMAC_SIZE) != 0) { + exit_horribly(modulename, "hmac verify failed\n"); + } + } else if (encLen > 0) { + exit_horribly(modulename, "read encrypted data error\n"); } } diff --git a/src/bin/psql/common_cipher.cpp b/src/bin/psql/common_cipher.cpp index 6066246b63..4f75f1a679 100644 --- a/src/bin/psql/common_cipher.cpp +++ b/src/bin/psql/common_cipher.cpp @@ -54,7 +54,6 @@ typedef void (*crypto_ctx_clean_type)(void *ctx); typedef int (*crypto_digest_type)(void *sess, ModuleDigestAlgo algo, unsigned char * data, size_t data_size,unsigned char *result, size_t *result_size); typedef int (*crypto_hmac_init_type)(void *sess, void **ctx, ModuleSymmKeyAlgo algo, unsigned char *key_id, size_t key_id_size); typedef void (*crypto_hmac_clean_type)(void *ctx); -typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); typedef int (*crypto_gen_random_type)(void *sess, char *buffer, size_t size); typedef int (*crypto_deterministic_enc_dec_type)(void *sess, int enc, unsigned char *data, unsigned char *key_id, size_t key_id_size, size_t data_size, unsigned char *result, size_t *result_size); typedef int (*crypto_get_errmsg_type)(void *sess, char *errmsg); @@ -73,7 +72,7 @@ crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use = NULL; static crypto_digest_type crypto_digest_use = NULL; static crypto_hmac_init_type crypto_hmac_init_use = NULL; static crypto_hmac_clean_type crypto_hmac_clean_use = NULL; -static crypto_hmac_type crypto_hmac_use = NULL; +crypto_hmac_type crypto_hmac_use = NULL; static crypto_gen_random_type crypto_gen_random_use = NULL; static crypto_deterministic_enc_dec_type crypto_deterministic_enc_dec_use = NULL; static crypto_get_errmsg_type crypto_get_errmsg_use = NULL; @@ -143,7 +142,7 @@ void unload_crypto_module(int code, void* args) } } -static int transform_type(char* type) +static ModuleSymmKeyAlgo transform_type(char* type) { if (strcmp(type, "AES128_CBC") == 0) { return MODULE_AES_128_CBC; @@ -163,7 +162,7 @@ static int transform_type(char* type) return MODULE_SM4_CTR; } - return -1; + return MODULE_ALGO_MAX; } @@ -251,6 +250,57 @@ void symmEncDec(DecryptInfo* pDecryptInfo, bool isEnc, char* indata, int inlen, } } +static ModuleSymmKeyAlgo getHmacType(ModuleSymmKeyAlgo symmAlgoType) +{ + if (symmAlgoType >= MODULE_AES_128_CBC && symmAlgoType <= MODULE_AES_256_GCM) { + return MODULE_HMAC_SHA256; + } else if (symmAlgoType == MODULE_SM4_CBC || symmAlgoType == MODULE_SM4_CTR){ + return MODULE_HMAC_SM3; + } + + return MODULE_ALGO_MAX; +} + +void initHmacCtx(DecryptInfo* pDecryptInfo) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_hmac_init_use(pDecryptInfo->moduleSessionCtx, &(pDecryptInfo->moduleHmacCtx), getHmacType(transform_type(pDecryptInfo->crypto_type)), pDecryptInfo->Key, pDecryptInfo->keyLen); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + crypto_module_sess_exit_use(pDecryptInfo->moduleSessionCtx); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } + +} + +void releaseHmacCtx(int code, void* args) +{ + if (libhandle && ((DecryptInfo*)args)->moduleHmacCtx) { + crypto_hmac_clean_use(((DecryptInfo*)args)->moduleHmacCtx); + ((DecryptInfo*)args)->moduleHmacCtx = NULL; + } +} + +void cryptoHmac(DecryptInfo* pDecryptInfo, char* indata, int inlen, char* outdata, int* outlen) +{ + int ret = 1; + char errmsg[MAX_ERRMSG_LEN] = {0}; + + ret = crypto_hmac_use(pDecryptInfo->moduleHmacCtx, (unsigned char*)indata, inlen, (unsigned char*)outdata, (size_t*)outlen); + if (ret != 1) { + crypto_get_errmsg_use(NULL, errmsg); + releaseHmacCtx(0, pDecryptInfo); + releaseCryptoCtx(0, pDecryptInfo); + releaseCryptoSession(0, pDecryptInfo); + unload_crypto_module(0, NULL); + fprintf(stderr, ("%s\n"), errmsg); + exit(1); + } +} + void CryptoModuleParamsCheck(DecryptInfo* pDecryptInfo, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt) { errno_t rc = 0; @@ -306,7 +356,9 @@ void CryptoModuleParamsCheck(DecryptInfo* pDecryptInfo, const char* params, cons } initCryptoKeyCtx(pDecryptInfo); + initHmacCtx(pDecryptInfo); pDecryptInfo->encryptInclude = true; pDecryptInfo->clientSymmCryptoFunc = crypto_encrypt_decrypt_use; + pDecryptInfo->clientHmacFunc = crypto_hmac_use; } diff --git a/src/bin/psql/common_cipher.h b/src/bin/psql/common_cipher.h index 0a5d0a56b6..3d9bc68791 100644 --- a/src/bin/psql/common_cipher.h +++ b/src/bin/psql/common_cipher.h @@ -17,7 +17,10 @@ extern "C" { #define MAX_WRITE_CACHE_LEN (MAX_CRYPTO_CACHE_LEN - CRYPTO_BLOCK_SIZE) /*加密算法补pad模式为强补,最多可以补16字节,所以写缓存少16字节,则密文最长8192、保证读取时可以整块密文读入*/ typedef int (*crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +typedef int (*crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); + extern crypto_encrypt_decrypt_type crypto_encrypt_decrypt_use; +extern crypto_hmac_type crypto_hmac_use; extern bool load_crypto_module_lib(); extern void unload_crypto_module(int code, void* args); @@ -28,6 +31,9 @@ extern void initCryptoKeyCtx(DecryptInfo* pDecryptInfo); extern void releaseCryptoCtx(int code, void* args); extern void symmEncDec(DecryptInfo* pDecryptInfo, bool isEnc, char* indata, int inlen, char* outdata, int* outlen); extern void symmGenerateKey(DecryptInfo* pDecryptInfo); +extern void initHmacCtx(DecryptInfo* pDecryptInfo); +extern void releaseHmacCtx(int code, void* args); +extern void cryptoHmac(DecryptInfo* pDecryptInfo, char* indata, int inlen, char* outdata, int* outlen); extern void CryptoModuleParamsCheck(DecryptInfo* pDecryptInfo, const char* params, const char* module_encrypt_mode, const char* module_encrypt_key, const char* module_encrypt_salt); #ifdef __cplusplus diff --git a/src/gausskernel/cbb/utils/aes/aes.cpp b/src/gausskernel/cbb/utils/aes/aes.cpp index e9c14cdd98..d8f2660296 100644 --- a/src/gausskernel/cbb/utils/aes/aes.cpp +++ b/src/gausskernel/cbb/utils/aes/aes.cpp @@ -69,7 +69,7 @@ bool init_aes_vector_random(GS_UCHAR* aes_vector, size_t vector_len) /* inputstrlen must include the terminating '\0' character */ bool writeFileAfterEncryption( - FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* randvalue, void* moduleKeyCtx, kernel_crypto_encrypt_decrypt_type encFunc) + FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* randvalue, void* moduleKeyCtx, kernel_crypto_encrypt_decrypt_type encFunc, void* moduleHmacCtx, kernel_crypto_hmac_type hmacFunc) { void* writeBuff = NULL; int64 writeBuffLen; @@ -79,6 +79,7 @@ bool writeFileAfterEncryption( GS_UINT32 outputlen; bool encryptstatus = false; errno_t errorno = EOK; + int cipherstart = 0; if ((inputstr == NULL) || inputstrlen <= 0) { return false; @@ -92,8 +93,13 @@ bool writeFileAfterEncryption( * cipher text len max is plain text len + RANDOM_LEN(aes128) * writeBufflen equals to ciphertextlen + RANDOM_LEN(rand_vector) + RANDOM_LEN(encrypt_salt). * so writeBufflen equals to inputstrlen(palin text len) + 48. + * if use crypto module,writebuff header after cipherlen add hmac,hmac length is 32. */ writeBuffLen = (int64)inputstrlen + RANDOM_LEN * 3; + if (moduleKeyCtx && encFunc && moduleHmacCtx && hmacFunc) { + writeBuffLen += CRYPTO_MODULE_HMAC_LEN; + } + if (writeBuffLen >= MAX_INT_NUM) { printf("invalid value of inputstrlen!\n"); return false; @@ -139,10 +145,21 @@ bool writeFileAfterEncryption( } /* the real encrypt operation */ - if (moduleKeyCtx && encFunc) { + if (moduleKeyCtx && encFunc && moduleHmacCtx && hmacFunc) { int ret = 1; + size_t hmaclen = 0; cipherlen = outputlen; + /*caculate plaint hmac*/ + ret = hmacFunc(moduleHmacCtx, (unsigned char*)inputstr, inputstrlen, (unsigned char*)writeBuff + RANDOM_LEN, &hmaclen); + if (ret != 1) { + free(writeBuff); + writeBuff = NULL; + free(outputstr); + outputstr = NULL; + return false; + } + ret = encFunc(moduleKeyCtx, 1, (unsigned char*)inputstr, inputstrlen, randvalue, 16, (unsigned char*)outputstr, (size_t*)(&cipherlen), NULL); if (ret != 1) { free(writeBuff); @@ -151,6 +168,9 @@ bool writeFileAfterEncryption( outputstr = NULL; return false; } + + cipherlen += CRYPTO_MODULE_HMAC_LEN; + cipherstart = CRYPTO_MODULE_HMAC_LEN + RANDOM_LEN; } else { encryptstatus = aes128Encrypt((GS_UCHAR*)inputstr, (GS_UINT32)inputstrlen, @@ -166,6 +186,8 @@ bool writeFileAfterEncryption( outputstr = NULL; return false; } + + cipherstart = RANDOM_LEN; } errorno = sprintf_s(encryptleninfo, sizeof(encryptleninfo), "%u", cipherlen); @@ -173,11 +195,16 @@ bool writeFileAfterEncryption( errorno = memcpy_s((void*)((char*)writeBuff), writeBuffLen, encryptleninfo, RANDOM_LEN); securec_check_c(errorno, "\0", "\0"); /* the ciphertext contains the real cipher and salt vector used for encrypt */ - errorno = memcpy_s((void*)((char*)writeBuff + RANDOM_LEN), writeBuffLen - RANDOM_LEN, outputstr, cipherlen); + /*stored cipherlen include hmac,however hmac has been stored in writeBuffer*/ + if (cipherstart == (CRYPTO_MODULE_HMAC_LEN + RANDOM_LEN)) { + cipherlen -= CRYPTO_MODULE_HMAC_LEN; + } + + errorno = memcpy_s((void*)((char*)writeBuff + cipherstart), writeBuffLen - cipherstart, outputstr, cipherlen); securec_check_c(errorno, "\0", "\0"); /* write the cipherlen info and cipher text into encrypt file. */ - if (fwrite(writeBuff, (unsigned long)(cipherlen + RANDOM_LEN), 1, pf) != 1) { + if (fwrite(writeBuff, (unsigned long)(cipherlen + cipherstart), 1, pf) != 1) { printf("write encrypt file failed.\n"); free(writeBuff); free(outputstr); @@ -219,6 +246,7 @@ void initDecryptInfo(DecryptInfo* pDecryptInfo) securec_check_c(errorno, "\0", "\0"); pDecryptInfo->moduleKeyCtx = NULL; + pDecryptInfo->moduleHmacCtx = NULL; pDecryptInfo->moduleSessionCtx = NULL; } static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) @@ -232,6 +260,7 @@ static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) bool decryptstatus = false; errno_t errorno = EOK; int moduleRet = 1; + bool hmacverified = false; if (!feof(source) && (false == pDecryptInfo->isCurrLineProcess)) { nread = (int)fread((void*)cipherleninfo, 1, RANDOM_LEN, source); @@ -289,8 +318,17 @@ static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) nread = (int)fread((void*)ciphertext, 1, cipherlen, source); if (nread) { if (pDecryptInfo->moduleKeyCtx && pDecryptInfo->clientSymmCryptoFunc) { + unsigned char hmac[CRYPTO_MODULE_HMAC_LEN + 1] = {0}; + size_t hmaclen = 0; plainlen = cipherlen; - moduleRet = pDecryptInfo->clientSymmCryptoFunc(pDecryptInfo->moduleKeyCtx, 0, ciphertext, cipherlen, pDecryptInfo->rand, 16, outputstr,(size_t*)(&plainlen), NULL); + moduleRet = pDecryptInfo->clientSymmCryptoFunc(pDecryptInfo->moduleKeyCtx, 0, ciphertext + CRYPTO_MODULE_HMAC_LEN, cipherlen - CRYPTO_MODULE_HMAC_LEN, + pDecryptInfo->rand, 16, outputstr,(size_t*)(&plainlen), NULL); + + /*verify hmac*/ + moduleRet = pDecryptInfo->clientHmacFunc(pDecryptInfo->moduleHmacCtx, outputstr, plainlen, hmac, &hmaclen); + if (strncmp((char*)hmac, (char*)ciphertext, CRYPTO_MODULE_HMAC_LEN) == 0) { + hmacverified = true; + } } else { decryptstatus = aes128Decrypt(ciphertext, (GS_UINT32)cipherlen, @@ -303,7 +341,8 @@ static bool decryptFromFile(FILE* source, DecryptInfo* pDecryptInfo) } if (!nread || (!decryptstatus && (pDecryptInfo->moduleKeyCtx == NULL && pDecryptInfo->clientSymmCryptoFunc == NULL)) - || (moduleRet != 1 && pDecryptInfo->moduleKeyCtx && pDecryptInfo->clientSymmCryptoFunc)) { + || (moduleRet != 1 && pDecryptInfo->moduleKeyCtx && pDecryptInfo->clientSymmCryptoFunc) + || (!hmacverified && pDecryptInfo->moduleHmacCtx && pDecryptInfo->clientHmacFunc)) { errorno = memset_s(ciphertext, cipherlen, '\0', cipherlen); securec_check_c(errorno, "", ""); free(ciphertext); diff --git a/src/include/utils/aes.h b/src/include/utils/aes.h index 5f0aa17407..9f7c1e8753 100644 --- a/src/include/utils/aes.h +++ b/src/include/utils/aes.h @@ -43,9 +43,11 @@ ((inputlen % AES_GROUP_LEN) ? ((inputlen / AES_GROUP_LEN) * AES_GROUP_LEN + AES_GROUP_LEN) : inputlen) typedef int (*kernel_crypto_encrypt_decrypt_type)(void *ctx, int enc, unsigned char *data, size_t data_size, unsigned char *iv, size_t iv_size, unsigned char *result, size_t *result_size, unsigned char *tag); +typedef int (*kernel_crypto_hmac_type)(void *ctx, unsigned char * data, size_t data_size, unsigned char *result, size_t *result_size); #define CRYPTO_MODULE_PARAMS_MAX_LEN 1024 #define CRYPTO_MODULE_ENC_TYPE_MAX_LEN 16 +#define CRYPTO_MODULE_HMAC_LEN 32 typedef struct decrypt_struct { unsigned char* decryptBuff; @@ -56,12 +58,14 @@ typedef struct decrypt_struct { bool encryptInclude; kernel_crypto_encrypt_decrypt_type clientSymmCryptoFunc; + kernel_crypto_hmac_type clientHmacFunc; /* Encrypt gs_dump file through OpenSSL function */ bool randget; unsigned char rand[RANDOM_LEN + 1]; void* moduleSessionCtx; void* moduleKeyCtx; + void* moduleHmacCtx; char crypto_modlue_params[CRYPTO_MODULE_PARAMS_MAX_LEN]; char crypto_type[CRYPTO_MODULE_ENC_TYPE_MAX_LEN]; } DecryptInfo; @@ -69,7 +73,9 @@ typedef struct decrypt_struct { extern void initDecryptInfo(DecryptInfo* pDecryptInfo); extern char* getLineFromAesEncryptFile(FILE* source, DecryptInfo* pDecryptInfo); extern bool writeFileAfterEncryption( - FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* rand, void* moduleKeyCtx = NULL, kernel_crypto_encrypt_decrypt_type encFunc = NULL); + FILE* pf, char* inputstr, int inputstrlen, int writeBufflen, unsigned char Key[], unsigned char* rand, + void* moduleKeyCtx = NULL, kernel_crypto_encrypt_decrypt_type encFunc = NULL, + void* moduleHmacCtx = NULL, kernel_crypto_hmac_type hmacFunc = NULL); extern bool check_key(const char* key, int NUM); extern void aesEncrypt(char* inputstr, unsigned long inputstrlen, char* outputstr, unsigned char Key[]); extern void aesDecrypt(char* inputstr, unsigned long inputstrlen, char* outputstr, unsigned char Key[], bool isBinary); -- Gitee From 9ed9edd1626d7902378d1e26e85bb83c60521844 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 4 Sep 2024 14:59:25 +0800 Subject: [PATCH 256/347] =?UTF-8?q?=E5=AE=8C=E5=96=84out=5Fparam=5Ffunc=5F?= =?UTF-8?q?overload=E5=9C=BA=E6=99=AF=E4=B8=8B=E4=B8=8D=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E7=9A=84=E5=87=BD=E6=95=B0=E7=9A=84=E6=8F=90=E7=A4=BA=E4=BF=A1?= =?UTF-8?q?=E6=81=AF=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/gram.y | 4 +++- src/test/regress/expected/out_param_func_overload.out | 8 ++++++++ src/test/regress/sql/out_param_func_overload.sql | 5 +++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 0183dfc3db..1ee23aff6a 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -7249,7 +7249,9 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate InsertErrorMessage(message, plpgsql_yylloc); ereport(errstate, (errcode(ERRCODE_DUPLICATE_FUNCTION), - errmsg("function \"%s\" isn't exclusive ", sqlstart))); + errmsg("function \"%s\" isn't exclusive ", sqlstart), + errdetail("The overload function must be package function or function with PACKAGE keyword.And do not mix overload functions of O style and PG style."), + errcause("The overload function must be package function."))); } } diff --git a/src/test/regress/expected/out_param_func_overload.out b/src/test/regress/expected/out_param_func_overload.out index dd1512266a..a74eef8cf5 100644 --- a/src/test/regress/expected/out_param_func_overload.out +++ b/src/test/regress/expected/out_param_func_overload.out @@ -327,6 +327,14 @@ drop view v1; drop package pac_test_1; NOTICE: drop cascades to function out_param_func_overload.f_test_1(integer,integer,integer) drop table t1; +create or replace procedure proc_test is +begin +perform count(1); +end; +/ +ERROR: function "count" isn't exclusive +DETAIL: The overload function must be package function or function with PACKAGE keyword.And do not mix overload functions of O style and PG style. +CONTEXT: compilation of PL/pgSQL function "proc_test" near line 1 --clean reset behavior_compat_options; drop schema out_param_func_overload cascade; diff --git a/src/test/regress/sql/out_param_func_overload.sql b/src/test/regress/sql/out_param_func_overload.sql index 1710d96dbc..4e3796d570 100644 --- a/src/test/regress/sql/out_param_func_overload.sql +++ b/src/test/regress/sql/out_param_func_overload.sql @@ -262,6 +262,11 @@ drop view v1; drop package pac_test_1; drop table t1; +create or replace procedure proc_test is +begin +perform count(1); +end; +/ --clean reset behavior_compat_options; -- Gitee From a4018fa9ade31f0fd5176d0fee479e12cf5d9ae4 Mon Sep 17 00:00:00 2001 From: z00848344 Date: Wed, 4 Sep 2024 17:29:57 +0800 Subject: [PATCH 257/347] =?UTF-8?q?=E4=BF=AE=E6=AD=A3run=5Fha=5Fsingle.sh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On branch hacheck_single_double Your branch is ahead of 'origin/hacheck_single_double' by 1 commit. Changes to be committed: modified: src/test/ha/ha_schedule_single --- src/test/ha/ha_schedule_single | 42 +++++++++---------- src/test/ha/pgxc_psd_single.py | 7 ++++ .../datareplica_cstore_big_cu.sh | 4 +- .../datareplica_cstore_rstore.sh | 3 +- .../datareplica_with_xlogreplica_status.sh | 1 + .../llt_single/disable_stream_replication.sh | 3 +- src/test/ha/testcase/llt_single/xlog_redo.sh | 4 +- .../failover_copy_col_rewind_standby.sh | 2 +- 8 files changed, 35 insertions(+), 31 deletions(-) diff --git a/src/test/ha/ha_schedule_single b/src/test/ha/ha_schedule_single index 407394e6c5..7bc9163cd9 100644 --- a/src/test/ha/ha_schedule_single +++ b/src/test/ha/ha_schedule_single @@ -5,7 +5,7 @@ data_replication_single/datareplica_cstore_build data_replication_single/datareplica_cstore_catchup data_replication_single/datareplica_cstore_multi_catchup data_replication_single/datareplica_cstore_slow_catchup -data_replication_single/datareplica_cstore_failover +#data_replication_single/datareplica_cstore_failover data_replication_single/datareplica_cstore_rstore data_replication_single/datareplica_failover_consistency data_replication_single/datareplica_with_xlogreplica_status @@ -18,33 +18,33 @@ data_replication_single/switchover dataqueue_single/dataqueue_concurrent_many_tables dataqueue_single/dataqueue_concurrent_one_table dataqueue_single/dataqueue_data_larger_than_queuesize -secondary_single/copy_ks_copy_kp_ss_failover +#secondary_single/copy_ks_copy_kp_ss_failover #secondary_single/dummy_abormal_failover -secondary_single/dummy_slot_choose_startpoint -secondary_single/dummystandby_no_catchup -secondary_single/failover_switchover -secondary_single/kill_ds_start_ds_ss_failover -secondary_single/kill_ds_with_masterstandby_normal -secondary_single/kill_ds_with_no_standby -secondary_single/standby_failover_consistency_check -secondary_single/standby_failover -secondary_single/standby_failover_connect_standby -secondary_single/dummystandby_crc +#secondary_single/dummy_slot_choose_startpoint +#secondary_single/dummystandby_no_catchup +#secondary_single/failover_switchover +#secondary_single/kill_ds_start_ds_ss_failover +#secondary_single/kill_ds_with_masterstandby_normal +#secondary_single/kill_ds_with_no_standby +#secondary_single/standby_failover_consistency_check +#secondary_single/standby_failover +#secondary_single/standby_failover_connect_standby +#secondary_single/dummystandby_crc slot_single/replication_slot -catchup_single/full_catchup +#catchup_single/full_catchup catchup_single/incremental_catchup catchup_single/switchover_fast -pg_rewind_single/failover_rewind_standby +#pg_rewind_single/failover_rewind_standby pg_rewind_single/failover_rewind_primary -pg_rewind_single/failover_copy_row_rewind_standby -pg_rewind_single/failover_copy_col_rewind_standby -pg_rewind_single/failover_copy_row_vacfull_rewind_standby -pg_rewind_single/failover_copy_row_vacuum_rewind_standby -pg_rewind_single/failover_full_incremental_build +#pg_rewind_single/failover_copy_row_rewind_standby +#pg_rewind_single/failover_copy_col_rewind_standby +#pg_rewind_single/failover_copy_row_vacfull_rewind_standby +#pg_rewind_single/failover_copy_row_vacuum_rewind_standby +#pg_rewind_single/failover_full_incremental_build pg_rewind_single/buildquery_normal -pg_rewind_single/gs_rewind_llt +#pg_rewind_single/gs_rewind_llt #pg_rewind_single/enhance_rewind_test -pg_rewind_single/rewind_primary_connect_dummy +#pg_rewind_single/rewind_primary_connect_dummy llt_single/disable_stream_replication llt_single/validate_pool_connection llt_single/walreplica_rcvbuffer diff --git a/src/test/ha/pgxc_psd_single.py b/src/test/ha/pgxc_psd_single.py index d44f794804..879fe93792 100644 --- a/src/test/ha/pgxc_psd_single.py +++ b/src/test/ha/pgxc_psd_single.py @@ -261,11 +261,18 @@ class Pterodb(): print datanode_cmd os.system(datanode_cmd) + datanode_cmd = g_valgrind + install_path + "/bin/gs_ctl" + " build "+ " -D " + self.data_dir + "/" + self.dname_prefix + str(i) + "_standby" + " > " + self.data_dir + "/" + self.dname_prefix + str(i) +"_standby"+ "/logdn" + str(i) + ".log 2>&1 &" + print datanode_cmd + os.system(datanode_cmd) time.sleep(3) datanode_cmd = g_valgrind + install_path + "/bin/gaussdb --single_node" + " -M standby -R " + " -D " + self.data_dir + "/" + self.dname_prefix + str(i) + "_dummystandby" + " > " + self.data_dir + "/" + self.dname_prefix + str(i) +"_dummystandby"+ "/logdn" + str(i) + ".log 2>&1 &" print datanode_cmd os.system(datanode_cmd) + + datanode_cmd = g_valgrind + install_path + "/bin/gs_ctl" + " build " + " -D " + self.data_dir + "/" + self.dname_prefix + str(i) + "_dummystandby" + " > " + self.data_dir + "/" + self.dname_prefix + str(i) +"_dummystandby"+ "/logdn" + str(i) + ".log 2>&1 &" + print datanode_cmd + os.system(datanode_cmd) time.sleep(1) def __stop_server(self): diff --git a/src/test/ha/testcase/data_replication_single/datareplica_cstore_big_cu.sh b/src/test/ha/testcase/data_replication_single/datareplica_cstore_big_cu.sh index 67ab89ff87..4b7c41b41a 100644 --- a/src/test/ha/testcase/data_replication_single/datareplica_cstore_big_cu.sh +++ b/src/test/ha/testcase/data_replication_single/datareplica_cstore_big_cu.sh @@ -25,7 +25,7 @@ fi cstore_rawdata_lines=15000 #create table -gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists big_cu_table; create table big_cu_table (c_id bigint NOT NULL,c_d_id int NOT NULL,c_w_id int NOT NULL,c_first varchar(16) NOT NULL,c_middle varchar NOT NULL,c_last TEXT NOT NULL, c_street_1 varchar(20) NOT NULL,c_street_2 varchar(20) NOT NULL,c_city varchar(20) NOT NULL,c_state char(2) NOT NULL,c_zip char(9) NOT NULL,c_phone char(16) NOT NULL, c_since timestamp NOT NULL,c_credit char(2) NOT NULL, c_credit_lim numeric(12,2) NOT NULL, c_discount numeric(4,4) NOT NULL,c_balance numeric(12,2) NOT NULL,c_ytd_payment numeric(12,2) NOT NULL,c_payment_cnt int NOT NULL,c_delivery_cnt int NOT NULL, c_data varchar(500) NOT NULL , partial cluster key(c_id)) with (orientation=column) distribute by hash (c_d_id);" +gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists big_cu_table; create table big_cu_table (c_id bigint NOT NULL,c_d_id int NOT NULL,c_w_id int NOT NULL,c_first varchar(16) NOT NULL,c_middle varchar NOT NULL,c_last TEXT NOT NULL, c_street_1 varchar(20) NOT NULL,c_street_2 varchar(20) NOT NULL,c_city varchar(20) NOT NULL,c_state char(2) NOT NULL,c_zip char(9) NOT NULL,c_phone char(16) NOT NULL, c_since timestamp NOT NULL,c_credit char(2) NOT NULL, c_credit_lim numeric(12,2) NOT NULL, c_discount numeric(4,4) NOT NULL,c_balance numeric(12,2) NOT NULL,c_ytd_payment numeric(12,2) NOT NULL,c_payment_cnt int NOT NULL,c_delivery_cnt int NOT NULL, c_data varchar(500) NOT NULL , partial cluster key(c_id)) with (orientation=column);" gsql -d $db -p $dn1_primary_port -c "set enable_data_replicate=on; copy big_cu_table from '$scripts_dir/data/customer0_0' with csv null '';" @@ -72,7 +72,7 @@ fi cstore_rawdata_lines=15000 #create table -gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists big_cu_table; create table big_cu_table (c_id bigint NOT NULL,c_d_id int NOT NULL,c_w_id int NOT NULL,c_first varchar(16) NOT NULL,c_middle varchar NOT NULL,c_last TEXT NOT NULL, c_street_1 varchar(20) NOT NULL,c_street_2 varchar(20) NOT NULL,c_city varchar(20) NOT NULL,c_state char(2) NOT NULL,c_zip char(9) NOT NULL,c_phone char(16) NOT NULL, c_since timestamp NOT NULL,c_credit char(2) NOT NULL, c_credit_lim numeric(12,2) NOT NULL, c_discount numeric(4,4) NOT NULL,c_balance numeric(12,2) NOT NULL,c_ytd_payment numeric(12,2) NOT NULL,c_payment_cnt int NOT NULL,c_delivery_cnt int NOT NULL, c_data varchar(500) NOT NULL , partial cluster key(c_id)) with (orientation=column) distribute by hash (c_d_id);" +gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists big_cu_table; create table big_cu_table (c_id bigint NOT NULL,c_d_id int NOT NULL,c_w_id int NOT NULL,c_first varchar(16) NOT NULL,c_middle varchar NOT NULL,c_last TEXT NOT NULL, c_street_1 varchar(20) NOT NULL,c_street_2 varchar(20) NOT NULL,c_city varchar(20) NOT NULL,c_state char(2) NOT NULL,c_zip char(9) NOT NULL,c_phone char(16) NOT NULL, c_since timestamp NOT NULL,c_credit char(2) NOT NULL, c_credit_lim numeric(12,2) NOT NULL, c_discount numeric(4,4) NOT NULL,c_balance numeric(12,2) NOT NULL,c_ytd_payment numeric(12,2) NOT NULL,c_payment_cnt int NOT NULL,c_delivery_cnt int NOT NULL, c_data varchar(500) NOT NULL , partial cluster key(c_id)) with (orientation=column);" gsql -d $db -p $dn1_primary_port -c "set enable_data_replicate=on; copy big_cu_table from '$scripts_dir/data/customer0_0' with csv null '';" diff --git a/src/test/ha/testcase/data_replication_single/datareplica_cstore_rstore.sh b/src/test/ha/testcase/data_replication_single/datareplica_cstore_rstore.sh index 3455b2203f..d6d897ef86 100644 --- a/src/test/ha/testcase/data_replication_single/datareplica_cstore_rstore.sh +++ b/src/test/ha/testcase/data_replication_single/datareplica_cstore_rstore.sh @@ -29,8 +29,7 @@ gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists mpp_test2; CREATE TABL , L_SHIPMODE CHAR(10) , L_COMMENT VARCHAR(44) ) - with (orientation = column) - distribute by hash(L_ORDERKEY);" + with (orientation = column);" #copy data(25M) to standby 4 times gsql -d $db -p $dn1_primary_port -c "set enable_data_replicate=on; copy mpp_test1 from '$scripts_dir/data/data5';" & diff --git a/src/test/ha/testcase/data_replication_single/datareplica_with_xlogreplica_status.sh b/src/test/ha/testcase/data_replication_single/datareplica_with_xlogreplica_status.sh index c8ba9989bc..8b6dae587a 100644 --- a/src/test/ha/testcase/data_replication_single/datareplica_with_xlogreplica_status.sh +++ b/src/test/ha/testcase/data_replication_single/datareplica_with_xlogreplica_status.sh @@ -61,6 +61,7 @@ fi function tear_down() { sleep 1 +start_primary gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists cstore_copy_t1;" } diff --git a/src/test/ha/testcase/llt_single/disable_stream_replication.sh b/src/test/ha/testcase/llt_single/disable_stream_replication.sh index 3bc9a74f7b..a39fc15e1d 100644 --- a/src/test/ha/testcase/llt_single/disable_stream_replication.sh +++ b/src/test/ha/testcase/llt_single/disable_stream_replication.sh @@ -32,8 +32,7 @@ gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists mpp_test2; CREATE TABL , L_SHIPMODE CHAR(10) , L_COMMENT VARCHAR(44) ) - with (orientation = column) - distribute by hash(L_ORDERKEY);" + with (orientation = column);" gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists mpp_test3;" #data replication test for row store diff --git a/src/test/ha/testcase/llt_single/xlog_redo.sh b/src/test/ha/testcase/llt_single/xlog_redo.sh index 7b5843016f..319d8482b6 100644 --- a/src/test/ha/testcase/llt_single/xlog_redo.sh +++ b/src/test/ha/testcase/llt_single/xlog_redo.sh @@ -26,9 +26,7 @@ gsql -d $db -p $dn1_primary_port -c "CREATE unlogged TABLE table_replication_12 , R_NAME CHAR(25) NOT NULL , R_COMMENT VARCHAR(152) ) - with (orientation = column) - distribute by hash(R_REGIONKEY) - ; + with (orientation = column); vacuum full table_replication_12;" gsql -d $db -p $dn1_primary_port -c "checkpoint;" diff --git a/src/test/ha/testcase/pg_rewind_single/failover_copy_col_rewind_standby.sh b/src/test/ha/testcase/pg_rewind_single/failover_copy_col_rewind_standby.sh index bec5aedb96..8f3602c443 100644 --- a/src/test/ha/testcase/pg_rewind_single/failover_copy_col_rewind_standby.sh +++ b/src/test/ha/testcase/pg_rewind_single/failover_copy_col_rewind_standby.sh @@ -9,7 +9,7 @@ check_instance cstore_rawdata_lines=15000 #create table on cn, dn_primary, dn_standby -gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists big_cu_table; create table big_cu_table (c_id bigint NOT NULL,c_d_id int NOT NULL,c_w_id int NOT NULL,c_first varchar(16) NOT NULL,c_middle varchar NOT NULL,c_last TEXT NOT NULL, c_street_1 varchar(20) NOT NULL,c_street_2 varchar(20) NOT NULL,c_city varchar(20) NOT NULL,c_state char(2) NOT NULL,c_zip char(9) NOT NULL,c_phone char(16) NOT NULL, c_since timestamp NOT NULL,c_credit char(2) NOT NULL, c_credit_lim numeric(12,2) NOT NULL, c_discount numeric(4,4) NOT NULL,c_balance numeric(12,2) NOT NULL,c_ytd_payment numeric(12,2) NOT NULL,c_payment_cnt int NOT NULL,c_delivery_cnt int NOT NULL, c_data varchar(500) NOT NULL , partial cluster key(c_id)) with (orientation=column) distribute by hash (c_d_id);" +gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists big_cu_table; create table big_cu_table (c_id bigint NOT NULL,c_d_id int NOT NULL,c_w_id int NOT NULL,c_first varchar(16) NOT NULL,c_middle varchar NOT NULL,c_last TEXT NOT NULL, c_street_1 varchar(20) NOT NULL,c_street_2 varchar(20) NOT NULL,c_city varchar(20) NOT NULL,c_state char(2) NOT NULL,c_zip char(9) NOT NULL,c_phone char(16) NOT NULL, c_since timestamp NOT NULL,c_credit char(2) NOT NULL, c_credit_lim numeric(12,2) NOT NULL, c_discount numeric(4,4) NOT NULL,c_balance numeric(12,2) NOT NULL,c_ytd_payment numeric(12,2) NOT NULL,c_payment_cnt int NOT NULL,c_delivery_cnt int NOT NULL, c_data varchar(500) NOT NULL , partial cluster key(c_id)) with (orientation=column);" #copy data to primary stop_primary -- Gitee From 21ac1baedbdf5b2830e4f0d48593b755f4f6cf6c Mon Sep 17 00:00:00 2001 From: lyanna <1016943941@qq.com> Date: Thu, 5 Sep 2024 10:29:59 +0800 Subject: [PATCH 258/347] =?UTF-8?q?1=E3=80=81=E4=BF=AE=E5=A4=8Dtoast?= =?UTF-8?q?=E8=A1=A8=E5=AD=98=E5=82=A8=E7=B1=BB=E5=9E=8B=E5=92=8C=E7=B4=A2?= =?UTF-8?q?=E5=BC=95=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=202=E3=80=81=E4=BF=AE=E5=A4=8D=E5=9F=BA=E7=A1=80=E8=A1=A8?= =?UTF-8?q?=E5=92=8Ctoast=E8=A1=A8=E6=98=A5=E5=88=9D=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/toasting.cpp | 2 +- .../storage/access/common/reloptions.cpp | 26 ++--- .../storage/access/heap/heapam.cpp | 3 + .../storage/access/nbtree/nbtinsert.cpp | 2 +- .../access/ustore/knl_uextremeredo.cpp | 1 + .../storage/access/ustore/knl_utuptoaster.cpp | 105 ++---------------- src/include/access/heapam.h | 2 +- src/include/access/ustore/knl_upage.h | 6 +- 8 files changed, 28 insertions(+), 119 deletions(-) diff --git a/src/common/backend/catalog/toasting.cpp b/src/common/backend/catalog/toasting.cpp index 394d73e528..9cbfd6a428 100644 --- a/src/common/backend/catalog/toasting.cpp +++ b/src/common/backend/catalog/toasting.cpp @@ -135,10 +135,10 @@ static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Da Oid toast_relid; Oid toast_typid = InvalidOid; Oid namespaceid; - Oid index_am_oid = BTREE_AM_OID; char toast_relname[NAMEDATALEN]; char toast_idxname[NAMEDATALEN]; IndexInfo* indexInfo = NULL; + Oid index_am_oid = BTREE_AM_OID; Oid collationObjectId[2]; Oid classObjectId[2]; int16 coloptions[2]; diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 72623bb219..3c3b356977 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -985,25 +985,21 @@ Datum transformRelOptions(Datum oldOptions, List *defList, const char *namspace, } } - if (namspace != NULL && pg_strcasecmp(namspace, "toast") == 0 && toastStorageType != NULL) { - const char *actualStorageType = NULL; - if (storageType == NULL) { - actualStorageType = u_sess->attr.attr_sql.enable_default_ustore_table ? "ustore" : "astore"; - } else { - actualStorageType = storageType; - } - - if (pg_strcasecmp(actualStorageType, "astore") == 0 || pg_strcasecmp(actualStorageType, "ustore") == 0) { - if (pg_strcasecmp(actualStorageType, toastStorageType) != 0) { + /* we did not specify a storage type for toast, so use the same storage type as its parent */ + if (namspace != NULL && pg_strcasecmp(namspace, "toast") == 0) { + if (toastStorageType != NULL) { + const char *parentStorageType = (storageType == NULL) + ? (u_sess->attr.attr_sql.enable_default_ustore_table ? "ustore" : "astore") + : storageType; + if (parentStorageType != toastStorageType) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("toast cannot be set for %s with storage_type=%s", actualStorageType, toastStorageType))); + errmsg("parent storage type is %s but toast storage type is %s, toast should use the " + "same storage type as its parent", + parentStorageType, toastStorageType))); } } - } - /* we did not specify a storage type for toast, so use the same storage type as its parent */ - if (namspace != NULL && pg_strcasecmp(namspace, "toast") == 0 && !toastStorageTypeSet) { - if (storageType != NULL) { + if (!toastStorageTypeSet && storageType != NULL) { Size len = VARHDRSZ + strlen("storage_type") + 1 + strlen(storageType); /* +1 leaves room for sprintf's trailing null */ text *t = (text *)palloc(len + 1); diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp index 18f5f46e52..5e863fa11a 100755 --- a/src/gausskernel/storage/access/heap/heapam.cpp +++ b/src/gausskernel/storage/access/heap/heapam.cpp @@ -2640,6 +2640,9 @@ bool heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot, bool bool result = false; Buffer buffer; HeapTupleData heap_tuple; + if (RelationIsUstoreFormat(relation)) { + ereport(ERROR, (errmsg("heap_hot_search relation type is ustore!"))); + } buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); LockBuffer(buffer, BUFFER_LOCK_SHARE); diff --git a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp index 4227aaeec7..8e75c88979 100644 --- a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp @@ -2584,7 +2584,7 @@ static bool CheckItemIsAlive(ItemPointer tid, Relation relation, Snapshot snapsh bool* all_dead, CUDescScan* cudescScan) { if (!RelationIsCUFormat(relation)) { - return TableIndexFetchTupleCheck(relation, tid, snapshot, all_dead); + return heap_hot_search(tid, relation, snapshot, all_dead); } else { return cudescScan->CheckItemIsAlive(tid); } diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp index ec72f15af0..757f1ea1e6 100644 --- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp @@ -1533,6 +1533,7 @@ void UHeap2XlogFreezeOperatorPage(RedoBufferInfo *buffer, void *recorddata, void offsets++; } } + PageSetLSN(page, buffer->lsn); } void UHeap2XlogExtendTDSlotsOperatorPage(RedoBufferInfo *buffer, void *recorddata) diff --git a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp index 130da8f175..e3b58a83e6 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp @@ -46,22 +46,20 @@ static Datum UHeapToastSaveDatum(Relation rel, Datum value, struct varlena *olde static Datum UHeapToastCompressDatum(Datum value); static bool UHeapToastIdValueIdExists(Oid toastrelid, Oid valueid, int2 bucketid); static bool UHeapToastRelValueidExists(Relation toastrel, Oid valueid); -static Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn, - bool *inconsistent); +static Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn); static Datum UHeapToastCompressDatum(Datum value) { return toast_compress_datum(value); } -Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn, bool *inconsistent) +Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn) { Oid newOid; SysScanDesc scan; ScanKeyData key; bool collides = false; Assert(RelationIsUstoreFormat(relation) || RelationIsToast(relation)); - Assert(inconsistent != NULL); TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), false, relation->rd_tam_ops); /* Generate new OIDs until we find one not in the table */ do { @@ -72,40 +70,12 @@ Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn * chunk_id for toast datum to prevent wrap around. */ newOid = GetNewObjectId(IsToastNamespace(RelationGetNamespace(relation))); - *inconsistent = false; ScanKeyInit(&key, oidcolumn, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(newOid)); /* see notes above about using SnapshotAny */ scan = systable_beginscan(relation, indexId, true, SnapshotAny, ATTR_FIRST, &key); - while (UHeapSysIndexGetnextSlot(scan, ForwardScanDirection, slot)) { - bool isnull = false; - UHeapTuple ttup = ExecGetUHeapTupleFromSlot(slot); - Oid chunk_id = DatumGetObjectId(UHeapFastGetAttr(ttup, ATTR_FIRST, RelationGetDescr(relation), &isnull)); - Assert(!isnull); - if (chunk_id == newOid) { - collides = true; - break; - } else { - *inconsistent = true; - if (scan->iscan != NULL && (!scan->iscan->xactStartedInRecovery)) { - scan->iscan->kill_prior_tuple = true; - BTScanOpaque so = (BTScanOpaque)scan->iscan->opaque; - if (so != NULL) { - BTScanPosItem indexItem = so->currPos.items[so->currPos.itemIndex]; - OffsetNumber indexOffset = indexItem.indexOffset; - ItemPointerData heapTid = indexItem.heapTid; - ereport(LOG, (errcode(ERRCODE_UNEXPECTED_CHUNK_VALUE), - errmsg("found toast chunk %u is not scan toast value %u of toast relation %u, will skip." - "toast index tuple at offset %hu with ctid (%u, %u) is marked dead.", - chunk_id, newOid, relation->rd_node.relNode, indexOffset, - ItemPointerGetBlockNumber(&heapTid), ItemPointerGetOffsetNumber(&heapTid)), - errcause("found toast chunk is not scan toast value."), - erraction("Check the toast chunk."))); - } - } - } - } + collides = UHeapSysIndexGetnextSlot(scan, ForwardScanDirection, slot); systable_endscan(scan); } while (collides); ExecDropSingleTupleTableSlot(slot); @@ -695,7 +665,6 @@ static Datum UHeapToastSaveDatum(Relation rel, Datum value, struct varlena *olde Pointer dval = DatumGetPointer(value); errno_t rc; int2 bucketid = InvalidBktId; - bool inconsistent = false; Assert(!VARATT_IS_EXTERNAL(value)); rc = memset_s(&chunkData, sizeof(chunkData), 0, sizeof(chunkData)); securec_check(rc, "", ""); @@ -767,7 +736,7 @@ static Datum UHeapToastSaveDatum(Relation rel, Datum value, struct varlena *olde */ if (!OidIsValid(rel->rd_toastoid)) { /* normal case: just choose an unused OID */ - toastPointer.va_valueid = UHeapGetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1, &inconsistent); + toastPointer.va_valueid = UHeapGetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); } else { /* rewrite case: check to see if value was in old toast table */ toastPointer.va_valueid = InvalidOid; @@ -812,7 +781,7 @@ static Datum UHeapToastSaveDatum(Relation rel, Datum value, struct varlena *olde * old or new toast table */ do { - toastPointer.va_valueid = UHeapGetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1, &inconsistent); + toastPointer.va_valueid = UHeapGetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); } while (UHeapToastIdValueIdExists(rel->rd_toastoid, toastPointer.va_valueid, bucketid)); } } @@ -855,7 +824,7 @@ static Datum UHeapToastSaveDatum(Relation rel, Datum value, struct varlena *olde * the TOAST table, since we don't bother to update anything else. */ (void)index_insert(toastidx, tValues, tIsnull, &(toasttup->ctid), toastrel, - (toastidx->rd_index->indisunique && !inconsistent) ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + toastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); /* * Free memory @@ -913,8 +882,6 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options) SysScanDesc toastscan; UHeapTuple toasttup; int2 bucketid; - bool found = false; - bool isnull = false; if (!VARATT_IS_EXTERNAL_ONDISK_B(attr)) return; @@ -958,14 +925,6 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options) * Have a chunk, delete it */ toasttup = ExecGetUHeapTupleFromSlot(slot); - Oid chunk_id = DatumGetObjectId(UHeapFastGetAttr(toasttup, ATTR_FIRST, RelationGetDescr(toastrel), &isnull)); - Assert(!isnull); - if (chunk_id != toastPointer.va_valueid) { - ereport(LOG, (errmsg("Delete toast chunk %u is not scan toast chunk %u of toast relation is %u, will skip", - chunk_id, toastPointer.va_valueid, toastPointer.va_toastrelid))); - continue; - } - found = true; SimpleUHeapDelete(toastrel, &toasttup->ctid, SnapshotToast); Datum values[INDEX_MAX_KEYS]; @@ -979,11 +938,6 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options) index_delete(toastidx, values, isnulls, &toasttup->ctid, false); } - if (!found) { - ereport(LOG, (errmsg("Toast chunk %u of toast relation is %u delete 0 rows", toastPointer.va_valueid, - toastPointer.va_toastrelid))); - } - /* * End scan and close relations */ @@ -1052,31 +1006,6 @@ struct varlena *UHeapInternalToastFetchDatum(struct varatt_external toastPointer * Have a chunk, extract the sequence number and the data */ ttup = ExecGetUHeapTupleFromSlot(slot); - Oid chunk_id = DatumGetObjectId(UHeapFastGetAttr(ttup, ATTR_FIRST, toastTupDesc, &isnull)); - Assert(!isnull); - if (chunk_id != toastPointer.va_valueid) { - if (toastscan->iscan != NULL && (!toastscan->iscan->xactStartedInRecovery)) { - toastscan->iscan->kill_prior_tuple = true; - BTScanOpaque so = (BTScanOpaque)toastscan->iscan->opaque; - if (so != NULL) { - BTScanPosItem indexItem = so->currPos.items[so->currPos.itemIndex]; - OffsetNumber indexOffset = indexItem.indexOffset; - ItemPointerData heapTid = indexItem.heapTid; - ereport(LOG, (errcode(ERRCODE_UNEXPECTED_CHUNK_VALUE), - errmsg("UHeapInternalToastFetchDatum found toast chunk %u is not scan toast chunk %u of " - "toast relation %u toast size detail (%d, %d), will skip." - "toast index tuple at offset %hu with ctid (%u, %u) is marked dead," - "toast tuple ctid is (%u, %u).", - chunk_id, toastPointer.va_valueid, toastPointer.va_toastrelid, - toastPointer.va_rawsize, toastPointer.va_extsize, indexOffset, - ItemPointerGetBlockNumber(&heapTid), ItemPointerGetOffsetNumber(&heapTid), - ItemPointerGetBlockNumber(&(ttup->ctid)), ItemPointerGetOffsetNumber(&(ttup->ctid))), - errcause("found toast chunk is not scan toast value."), - erraction("Check the toast chunk."))); - } - } - continue; - } residx = DatumGetInt32(UHeapFastGetAttr(ttup, ATTR_SECOND, toastTupDesc, &isnull)); Assert(!isnull); chunk = DatumGetPointer(UHeapFastGetAttr(ttup, ATTR_THIRD, toastTupDesc, &isnull)); @@ -1263,13 +1192,6 @@ struct varlena *UHeapInternalToastFetchDatumSlice(struct varatt_external toastPo * Have a chunk, extract the sequence number and the data */ ttup = ExecGetUHeapTupleFromSlot(slot); - Oid chunk_id = DatumGetObjectId(UHeapFastGetAttr(ttup, ATTR_FIRST, toastTupDesc, &isnull)); - Assert(!isnull); - if (chunk_id != toastPointer.va_valueid) { - ereport(LOG, (errmsg("UHeapInternalToastFetchDatumSlice find toast chunk %u is not scan toast chunk %u of " - "toast relation %u, will skip", chunk_id, toastPointer.va_valueid, toastPointer.va_toastrelid))); - continue; - } residx = DatumGetInt32(UHeapFastGetAttr(ttup, CHUNK_ID_ATTR, toastTupDesc, &isnull)); Assert(!isnull); chunk = DatumGetPointer(UHeapFastGetAttr(ttup, CHUNK_DATA_ATTR, toastTupDesc, &isnull)); @@ -1380,20 +1302,7 @@ static bool UHeapToastRelValueidExists(Relation toastrel, Oid valueid) * Is there any such chunk? */ toastscan = systable_beginscan(toastrel, toastrel->rd_rel->reltoastidxid, true, SnapshotAny, 1, &toastkey); - while (UHeapSysIndexGetnextSlot(toastscan, ForwardScanDirection, slot)) { - bool isnull = false; - UHeapTuple ttup = ExecGetUHeapTupleFromSlot(slot); - Oid chunk_id = DatumGetObjectId(UHeapFastGetAttr(ttup, ATTR_FIRST, RelationGetDescr(toastrel), &isnull)); - Assert(!isnull); - if (chunk_id == valueid) { - result = true; - break; - } - else { - ereport(LOG, (errmsg("UHeapToastRelValueidExists find toast chunk %u is not scan toast chunk %u of toast " - "relation %u, will skip", chunk_id, valueid, toastrel->rd_id))); - } - } + result = UHeapSysIndexGetnextSlot(toastscan, ForwardScanDirection, slot); systable_endscan(toastscan); ExecDropSingleTupleTableSlot(slot); diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index fe22661513..55590a6867 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -196,7 +196,7 @@ static const struct { "%s, PageHeaderInfo: pd_lsn:%X/%X, pd_checksum:%u, pd_flags:%u, " \ "pd_lower:%u, pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, pd_prune_xid:%u", \ _msg, pageHeader->pd_lsn.xlogid, \ - pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF + pageHeader->pd_lsn.xrecoff, \ + ((uint64)pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF) + pageHeader->pd_lsn.xrecoff, \ pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->pd_prune_xid); \ } while (0) diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index fc492f8cf6..dbae60e162 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -222,9 +222,9 @@ "%s, UPageHeaderInfo: pd_lsn:%X/%X, pd_checksum:%u, " \ "pd_flags:%u, pd_lower:%u, " \ "pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, potential_freespace:%u, td_count:%u, " \ - "pd_prune_xid:%lu, pd_xid_base:%lu, pd_multi_base:%lu" _msg, \ - pageHeader->pd_lsn.xlogid, \ - pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF + pageHeader->pd_lsn.xrecoff, \ + "pd_prune_xid:%lu, pd_xid_base:%lu, pd_multi_base:%lu", \ + _msg, pageHeader->pd_lsn.xlogid, \ + ((uint64)pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF) + pageHeader->pd_lsn.xrecoff, \ pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->potential_freespace, \ pageHeader->td_count, pageHeader->pd_prune_xid, pageHeader->pd_xid_base, pageHeader->pd_multi_base); \ -- Gitee From 870f395b783a872962118dcf575688e55517d2b0 Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Thu, 5 Sep 2024 16:09:52 +0800 Subject: [PATCH 259/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dgs=5Fdump=E5=AF=BC?= =?UTF-8?q?=E5=87=BAB=E5=BA=93=E5=BC=80=E5=90=AFquote=5Fall=5Fidentifiers?= =?UTF-8?q?=E5=AF=BC=E8=87=B4=E5=88=97=E5=B1=9E=E6=80=A7=E4=B8=A2=E5=A4=B1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_dump.cpp | 18 ++++++++++++++++++ src/bin/pg_dump/pg_dump.h | 1 + 2 files changed, 19 insertions(+) diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 6ce030f0a4..f32ecc84fe 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -20050,6 +20050,7 @@ static void dumpTableSchema(Archive* fout, TableInfo* tbinfo) appendPQExpBuffer(q, " DEFAULT %s", default_value); if (hasOnUpdateFeature) { + RemoveQuotes(onUpdate_value); if (pg_strcasecmp(onUpdate_value, "") != 0) { if (pg_strcasecmp(onUpdate_value, "pg_systimestamp()") == 0) { appendPQExpBuffer(q, " ON UPDATE CURRENT_TIMESTAMP"); @@ -24096,3 +24097,20 @@ bool TabExists(Archive* fout, const char* schemaName, const char* tabName) exist = isExistsSQLResult(AH->connection, query); return exist; } + +void RemoveQuotes(char *str) { + int len = strlen(str); + int readPtr = 0; + int writePtr = 0; + + while (readPtr < len) { + if (str[readPtr] == '"') { + readPtr++; // Skip the escape character '"' + } else { + str[writePtr] = str[readPtr]; + writePtr++; + readPtr++; + } + } + str[writePtr] = '\0'; // Add null terminator at the end +} diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index d32cf51f46..ea3562b5ab 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -644,6 +644,7 @@ extern void getSubscriptions(Archive *fout); extern EventTriggerInfo *getEventTriggers(Archive *fout, int *numEventTriggers); bool FuncExists(Archive* fout, const char* funcNamespace, const char* funcName); bool TabExists(Archive* fout, const char* schemaName, const char* tabName); +void RemoveQuotes(char *str); #ifdef GSDUMP_LLT void stopLLT(); -- Gitee From ca86a61c83e443994c8b6ed1efce0340c793d018 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Mon, 2 Sep 2024 20:50:24 +0800 Subject: [PATCH 260/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=AE=9E=E6=97=B6?= =?UTF-8?q?=E6=9E=84=E5=BB=BA=E5=A4=84=E7=90=86ddl=E6=97=A5=E5=BF=97?= =?UTF-8?q?=E6=97=B6=EF=BC=8C=E5=8F=91=E7=94=9Ffailover=E5=8D=A1=E4=BD=8F?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=EF=BC=9B=E8=A7=A3=E5=86=B3=E5=AE=9E?= =?UTF-8?q?=E6=97=B6=E6=9E=84=E5=BB=BA=E6=9A=82=E5=81=9C=E5=9C=BA=E6=99=AF?= =?UTF-8?q?=E4=B8=8B=EF=BC=8C=E5=A6=82=E6=9E=9C=E5=8F=91=E7=94=9Ffailover?= =?UTF-8?q?=EF=BC=8C=E5=87=BA=E7=8E=B0=E7=9A=84=E6=97=A5=E5=BF=97=E6=BC=8F?= =?UTF-8?q?=E5=9B=9E=E6=94=BE=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ondemand_extreme_rto/page_redo.cpp | 57 +++++++++++++++---- .../ondemand_extreme_rto/xlog_read.cpp | 20 ++----- 2 files changed, 51 insertions(+), 26 deletions(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index ac912e0602..51c0e69541 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -1441,22 +1441,28 @@ static bool WaitPrimaryDoCheckpointAndAllPRTrackEmpty(XLogRecParseState *preStat } while (!pg_atomic_compare_exchange_u64(&g_dispatcher->syncRecordPtr, &syncRecordPtr, ddlSyncPtr)); do { - if (pg_atomic_read_u32(&g_redoWorker->currentHtabBlockNum) == 0) { - // exit if hashmap manager already clear all hashmap + XLogRecPtr ckptRedoPtr = pg_atomic_read_u64(&g_dispatcher->ckptRedoPtr); + if (XLByteLE(ddlSyncPtr, ckptRedoPtr) && (pg_atomic_read_u32(&g_redoWorker->currentHtabBlockNum) == 0)) { + /* + * exit if primary redo loc is bigger than ddl loc and hashmap manager already clear all hashmap, + * do not wait seg queue empty because seg queue support concurrent modify + */ waitDone = true; break; } else if (SS_ONDEMAND_REALTIME_BUILD_FAILOVER) { - // exit if primary node crash - waitDone = false; + // exit if primary node crash, need refresh ckptRedoPtr + ckptRedoPtr = pg_atomic_read_u64(&g_dispatcher->ckptRedoPtr); + if (XLByteLE(ddlSyncPtr, ckptRedoPtr)) { + waitDone = true; + } else { + waitDone = false; + } break; } PageManagerProcHashmapPrune(); pg_usleep(100000L); /* 100 ms */ } while (true); - // clear all blocks in hashmap - g_redoWorker->nextPrunePtr = pg_atomic_read_u64(&g_dispatcher->ckptRedoPtr); - return waitDone; } @@ -1519,7 +1525,6 @@ static void OnDemandPageManagerRedoSegParseState(XLogRecParseState *preState) void PageManagerRedoParseState(XLogRecParseState *preState) { - PageManagerPruneIfRealtimeBuildFailover(); if (XLByteLT(preState->blockparse.blockhead.end_ptr, g_redoWorker->nextPrunePtr)) { ReleaseBlockParseStateIfNotReplay(preState); return; @@ -1532,6 +1537,7 @@ void PageManagerRedoParseState(XLogRecParseState *preState) return; } + PageManagerPruneIfRealtimeBuildFailover(); switch (preState->blockparse.blockhead.block_valid) { case BLOCK_DATA_MAIN_DATA_TYPE: case BLOCK_DATA_UNDO_TYPE: @@ -4064,9 +4070,9 @@ static XLogRecPtr RequestPrimaryCkptAndUpdateCkptRedoPtr() return ckptRedoPtr; } -const char *PauseStatus2Str(ondemand_recovery_pause_status_t pauseState) +const char *PauseStatus2Str(ondemand_recovery_pause_status_t status) { - switch (pauseState) { + switch (status) { case NOT_PAUSE: return "not_pause"; break; @@ -4088,6 +4094,30 @@ const char *PauseStatus2Str(ondemand_recovery_pause_status_t pauseState) } } +const char *RealtimeBuildStatus2Str(ondemand_realtime_build_status_t status) +{ + switch (status) { + case DISABLED: + return "DISABLED"; + break; + case BUILD_NORMAL: + return "BUILD_NORMAL"; + break; + case READY_TO_BUILD: + return "READY_TO_BUILD"; + break; + case BUILD_TO_DISABLED: + return "BUILD_TO_DISABLED"; + break; + case BUILD_TO_REDO: + return "BUILD_TO_REDO"; + break; + default: + return "UNKOWN"; + break; + } +} + static void OndemandPauseRedoAndRequestPrimaryDoCkpt(OndemandCheckPauseCB activatePauseFunc, OndemandCheckPauseCB continuePauseFunc, OndemandProcPauseStatusCB refreshPauseStatusFunc, OndemandProcPauseStatusCB logPauseStatusFunc, ondemand_recovery_pause_status_t pauseState, @@ -4107,6 +4137,13 @@ static void OndemandPauseRedoAndRequestPrimaryDoCkpt(OndemandCheckPauseCB activa // other redo workers will proc pause state directly if primary node crash if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { (void)RequestPrimaryCkptAndUpdateCkptRedoPtr(); + } else if (unlikely(onlyInRealtimeBuild && !SS_ONDEMAND_REALTIME_BUILD_NORMAL)) { + ereport(LOG, (errcode(ERRCODE_LOG), + errmsg("[On-demand] Ondemand realtime build status change to %s, give up request primary node do " + "checkpoint for pause type %s", + RealtimeBuildStatus2Str(g_instance.dms_cxt.SSRecoveryInfo.ondemand_realtime_build_status), + PauseStatus2Str(pauseState)))); + break; } if (refreshPauseStatusFunc != NULL) { diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp index 246cd10e71..200996195e 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp @@ -731,25 +731,13 @@ XLogRecord *XLogParallelReadNextRecord(XLogReaderState *xlogreader) /* In ondemand realtime build mode, loop back to retry. Otherwise, give up. */ if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { - xlogreader->preReadStartPtr = InvalidXlogPreReadStartPtr; - /* No valid record available from this source */ - streamFailCount++; - if (streamFailCount > SS_WAIT_TIME) { - XLogRecPtr primaryRedoLsn = InvalidXLogRecPtr; - primaryRedoLsn = SSOndemandRequestPrimaryCkptAndGetRedoLsn(); - streamFailCount = 0; - if (XLByteLT(t_thrd.xlog_cxt.ReadRecPtr, primaryRedoLsn)) { - ereport(WARNING, - (errmsg("read xlog record for %uth times at %X/%X", streamFailCount, - (uint32)(t_thrd.xlog_cxt.ReadRecPtr >> 32), (uint32)t_thrd.xlog_cxt.ReadRecPtr))); - } - } - retry = 0; - } else if (SS_ONDEMAND_REALTIME_BUILD_SHUTDOWN){ - // directly exit when ondemand_realtime_build_status = BUILD_TO_DISABLED, do not send endMark to dispatcher. xlogreader->preReadStartPtr = InvalidXlogPreReadStartPtr; retry = 0; + } else if (SS_ONDEMAND_REALTIME_BUILD_FAILOVER) { + xlogreader->preReadStartPtr = InvalidXlogPreReadStartPtr; + } else if (unlikely(SS_ONDEMAND_REALTIME_BUILD_SHUTDOWN)) { RedoInterruptCallBack(); + retry = 0; } if (retry <= 3) { -- Gitee From 03ff529a9f9c6a71f8c4de4ed0a0abca33b38408 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Thu, 5 Sep 2024 19:56:01 +0800 Subject: [PATCH 261/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=9E=81=E8=87=B4RTO=E5=9C=BA=E6=99=AF=E6=9C=89?= =?UTF-8?q?=E6=A6=82=E7=8E=87=E9=81=87=E5=88=B0=E7=A9=BA=E6=8C=87=E9=92=88?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_recovery.cpp | 5 +++-- .../access/transam/ondemand_extreme_rto/dispatcher.cpp | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp index ddfcb38234..9f96dbdbab 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_recovery.cpp @@ -141,8 +141,9 @@ bool SSRecoveryApplyDelay() if (SS_DISASTER_STANDBY_CLUSTER) { return true; } - - OnDemandNotifyHashMapPruneIfNeed(); + if (IsOndemandExtremeRtoMode) { + OnDemandNotifyHashMapPruneIfNeed(); + } while (g_instance.dms_cxt.SSRecoveryInfo.recovery_pause_flag || SS_ONDEMAND_RECOVERY_PAUSE) { /* might change the trigger file's location */ RedoInterruptCallBack(); diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index a4aa050ffe..3c96cc7b10 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -723,6 +723,7 @@ static void StopRecoveryWorkers(int code, Datum arg) g_instance.dms_cxt.SSRecoveryInfo.ondemand_realtime_build_status = DISABLED; ereport(LOG, (errmsg("[On-demand] realtime build shutdown, set status to DISABLED."))); } + g_instance.dms_cxt.SSRecoveryInfo.ondemand_recovery_pause_status = NOT_PAUSE; SSDestroyRecoveryWorkers(); g_startupTriggerState = TRIGGER_NORMAL; g_readManagerTriggerFlag = TRIGGER_NORMAL; -- Gitee From 0e63729052f1391871c121ae1f77cd5af474c665 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Wed, 4 Sep 2024 10:37:39 +0800 Subject: [PATCH 262/347] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4build=20tblspc=20=EF=BC=88cherry=20picked=20commit=20f?= =?UTF-8?q?rom=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_ctl/backup.cpp | 43 +++++++++++------- src/bin/pg_ctl/pg_build.cpp | 15 +++---- .../storage/replication/basebackup.cpp | 44 +++++++++++++------ 3 files changed, 64 insertions(+), 38 deletions(-) diff --git a/src/bin/pg_ctl/backup.cpp b/src/bin/pg_ctl/backup.cpp index 458c3bb23a..82b50a0cb1 100755 --- a/src/bin/pg_ctl/backup.cpp +++ b/src/bin/pg_ctl/backup.cpp @@ -937,7 +937,9 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) */ filename[strlen(filename) - 1] = '\0'; /* Remove trailing slash */ if (is_dss_file(filename)) { - continue; + if (strstr(filename, "pg_replication") != NULL || strstr(filename, "pg_xlog") != NULL) { + continue; + } } if (symlink(©buf[bufOffset + 1], filename) != 0) { if (!streamwal || strcmp(filename + strlen(filename) - len, "/pg_xlog") != 0) { @@ -953,13 +955,13 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) * Symbolic link for relative tablespace. please refer to function _tarWriteHeader */ filename[strlen(filename) - 1] = '\0'; /* Remove trailing slash */ - - nRet = snprintf_s(absolut_path, - sizeof(absolut_path), - sizeof(absolut_path) - 1, - "%s/%s", - basedir, - ©buf[bufOffset + 1]); + if (is_dss_file(absolut_path)) { + nRet = snprintf_s(absolut_path, sizeof(absolut_path), sizeof(absolut_path) - 1, "%s", + ©buf[bufOffset + 1]); + } else { + nRet = snprintf_s(absolut_path, sizeof(absolut_path), sizeof(absolut_path) - 1, "%s/%s", + basedir, ©buf[bufOffset + 1]); + } securec_check_ss_c(nRet, "\0", "\0"); if (symlink(absolut_path, filename) != 0) { @@ -1512,8 +1514,13 @@ static bool BaseBackup(const char* dirname, uint32 term) char* relative = PQgetvalue(res, i, 3); char prefix[MAXPGPATH] = {'\0'}; if (*relative == '1') { - nRet = snprintf_s(prefix, MAXPGPATH, strlen(basedir) + 1, "%s/", basedir); - securec_check_ss_c(nRet, "\0", "\0"); + if (ss_instance_config.dss.enable_dss) { + nRet = snprintf_s(prefix, MAXPGPATH, strlen(dssdir) + 1, "%s/", dssdir); + securec_check_ss_c(nRet, "\0", "\0"); + } else { + nRet = snprintf_s(prefix, MAXPGPATH, strlen(basedir) + 1, "%s/", basedir); + securec_check_ss_c(nRet, "\0", "\0"); + } } nRet = snprintf_s(nodetablespaceparentpath, MAXPGPATH, @@ -1522,13 +1529,15 @@ static bool BaseBackup(const char* dirname, uint32 term) prefix, tablespacepath); securec_check_ss_c(nRet, "\0", "\0"); - nRet = snprintf_s(nodetablespacepath, - MAXPGPATH, - sizeof(nodetablespacepath) - 1, - "%s/%s_%s", - nodetablespaceparentpath, - TABLESPACE_VERSION_DIRECTORY, - pgxcnodename); + + if (ss_instance_config.dss.enable_dss) { + nRet = snprintf_s(nodetablespacepath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + nodetablespaceparentpath, TABLESPACE_VERSION_DIRECTORY); + } else { + nRet = snprintf_s(nodetablespacepath, MAXPGPATH, MAXPGPATH - 1, "%s/%s_%s", + nodetablespaceparentpath, TABLESPACE_VERSION_DIRECTORY, + pgxcnodename); + } securec_check_ss_c(nRet, "\0", "\0"); bool varifySuccess = verify_dir_is_empty_or_create(nodetablespacepath); diff --git a/src/bin/pg_ctl/pg_build.cpp b/src/bin/pg_ctl/pg_build.cpp index 2e3c13f030..88c57145a6 100755 --- a/src/bin/pg_ctl/pg_build.cpp +++ b/src/bin/pg_ctl/pg_build.cpp @@ -1427,14 +1427,13 @@ void delete_datadir(const char* dirname) /* */ if (stat(linkpath, &st) == 0 && S_ISDIR(st.st_mode)) { - - nRet = snprintf_s(nodepath, - MAXPGPATH, - sizeof(nodepath) - 1, - "%s/%s_%s", - linkpath, - TABLESPACE_VERSION_DIRECTORY, - pgxcnodename); + if (ss_instance_config.dss.enable_dss) { + nRet = snprintf_s(nodepath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + linkpath, TABLESPACE_VERSION_DIRECTORY); + } else { + nRet = snprintf_s(nodepath, MAXPGPATH, MAXPGPATH - 1, "%s/%s_%s", + linkpath, TABLESPACE_VERSION_DIRECTORY, pgxcnodename); + } securec_check_ss_c(nRet, "", ""); if (!rmtree(nodepath, true, true)) { diff --git a/src/gausskernel/storage/replication/basebackup.cpp b/src/gausskernel/storage/replication/basebackup.cpp index 395bc08d9b..4296d957ac 100755 --- a/src/gausskernel/storage/replication/basebackup.cpp +++ b/src/gausskernel/storage/replication/basebackup.cpp @@ -1174,8 +1174,13 @@ int64 sendTablespace(const char *path, bool sizeonly) /* If the tablespace went away while scanning, it's no error. */ return 0; } - if (!sizeonly) - _tarWriteHeader(relativedirname, NULL, &statbuf); + if (!sizeonly) { + if (ENABLE_DSS) { + _tarWriteHeader(pathbuf, NULL, &statbuf); + } else { + _tarWriteHeader(relativedirname, NULL, &statbuf); + } + } size = BUILD_PATH_LEN; /* Size of the header just added */ /* Send all the files in the tablespace version directory */ @@ -1297,10 +1302,15 @@ bool IsSkipPath(const char * pathName) if (t_thrd.walsender_cxt.is_obsmode == true && strcmp(pathName, "./pg_replslot") == 0) return true; - /* skip pg_control in dss */ - if (ENABLE_DSS && strcmp(pathName, "+data/pg_control") == 0) { - return true; + if (ENABLE_DSS) { + char full_path[MAXPGPATH]; + int rc = snprintf_s(full_path, sizeof(full_path), sizeof(full_path) - 1, "%s/pg_control", + g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name); + securec_check_ss(rc, "\0", "\0"); + if (strcmp(pathName, full_path) == 0) { + return true; + } } return false; @@ -1397,7 +1407,7 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab struct stat statbuf; int64 size = 0; int rc = 0; - + char *dssdir = g_instance.attr.attr_storage.dss_attr.ss_dss_data_vg_name; DIR *dir = AllocateDir(path); while ((de = ReadDir(dir, path)) != NULL) { /* Skip special stuff */ @@ -1533,9 +1543,11 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab */ /* when ss dorado replication enabled, "+data/pg_replication/" also need to copy when backup */ - int pathNameLen = strlen("+data/pg_xlog"); - if (strcmp(pathbuf, "./pg_xlog") == 0 || strncmp(pathbuf, "+data/pg_xlog", pathNameLen) == 0 || - strcmp(pathbuf, "+data/pg_replication") == 0 || strcmp(pathbuf, "+data/pg_tblspc") == 0) { + if (strcmp(pathbuf, "./pg_xlog") == 0 || + (ENABLE_DSS && strncmp(pathbuf, dssdir, strlen(dssdir)) == 0 && + strstr(pathbuf + strlen(dssdir), "/pg_xlog") != NULL) || + (ENABLE_DSS && strcmp(pathbuf, dssdir) == 0 && + strstr(pathbuf + strlen(dssdir), "/pg_replication") != NULL)) { if (!sizeonly) { /* If pg_xlog is a symlink, write it as a directory anyway */ #ifndef WIN32 @@ -1635,7 +1647,9 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab } /* Allow symbolic links in pg_tblspc only */ - if (strcmp(path, "./pg_tblspc") == 0 && + if ((strcmp(path, "./pg_tblspc") == 0 || + (ENABLE_DSS && strncmp(pathbuf, dssdir, strlen(dssdir)) == 0 && + strstr(pathbuf + strlen(dssdir), "/pg_tblspc") != NULL)) && #ifndef WIN32 S_ISLNK(statbuf.st_mode) #else @@ -1652,8 +1666,8 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab ereport(ERROR, (errcode(ERRCODE_NAME_TOO_LONG), errmsg("symbolic link \"%s\" target is too long", pathbuf))); linkpath[rllen] = '\0'; - if (!sizeonly){ - if (ENABLE_DSS && is_dss_file(pathbuf)) { + if (!sizeonly || ENABLE_DSS){ + if (is_dss_file(pathbuf)) { _tarWriteHeader(pathbuf, linkpath, &statbuf); } else { _tarWriteHeader(pathbuf + basepathlen + 1, linkpath, &statbuf); @@ -1710,8 +1724,12 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab /* * skip sending directories inside pg_tblspc, if not required. */ - if (strcmp(pathbuf, "./pg_tblspc") == 0 && !sendtblspclinks) + if (strcmp(pathbuf, "./pg_tblspc") == 0 || + (ENABLE_DSS && strcmp(pathbuf, dssdir) == 0 && + strstr(pathbuf + strlen(dssdir), "/pg_tblspc") != NULL && + !sendtblspclinks)) { skip_this_dir = true; + } if (!skip_this_dir) size += sendDir(pathbuf, basepathlen, sizeonly, tablespaces, sendtblspclinks); } else if (S_ISREG(statbuf.st_mode)) { -- Gitee From 2cb3c26242d2e773df921bb3c2d8c0f258a486ac Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Thu, 5 Sep 2024 21:14:41 +0800 Subject: [PATCH 263/347] =?UTF-8?q?=E6=9B=B4=E6=94=B9=E5=AE=B9=E5=99=A8?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E6=9F=A5=E8=AF=A2=E5=AE=9E=E6=97=B6=E7=9A=84?= =?UTF-8?q?=E5=AE=9E=E4=BE=8B=E7=8A=B6=E6=80=81=E5=92=8C=E8=A7=92=E8=89=B2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docker/upgrade/upgrade_common.sh | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docker/upgrade/upgrade_common.sh b/docker/upgrade/upgrade_common.sh index 87dff17d60..3d601921cd 100644 --- a/docker/upgrade/upgrade_common.sh +++ b/docker/upgrade/upgrade_common.sh @@ -731,12 +731,8 @@ function start_dbnode() { } function query_dn_role() { - if [ -f "$GAUSS_TMP_PATH/temp_dn_role" ]; then - dn_role=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') - else - gs_ctl query -D ${GAUSSDATA} > ${GAUSS_TMP_PATH}/temp_dn_role - dn_role=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') - fi + gs_ctl query -D ${GAUSSDATA} > ${GAUSS_TMP_PATH}/temp_dn_role + dn_role=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') if [[ "$dn_role" = "Normal" ]]; then dn_role="normal" -- Gitee From 45af8e8b6d8c73faebf393b4598d80e8f04b3a96 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Fri, 6 Sep 2024 09:47:03 +0800 Subject: [PATCH 264/347] =?UTF-8?q?9.6=20600=E6=8E=A8=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index c4ef2e8f87..73fdda081d 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ dms_commit_id=8b64ce46c8cfa9a978604b346b0d32b264c8ee6c -dss_commit_id=5d3e2aefdb4b51430a576bfdde057c1c08383afb -cbb_commit_id=7878a1919d2c0304a19f398aeddf0093dc147a37 +dss_commit_id=621eb9d6aac34726db404446511be2de9ae32a3f +cbb_commit_id=2ea0e4ea6349f00ca85793480ee1ced952c3c8c7 -- Gitee From 08adf068a52a7cd9795837c510f0f7eb3fe696f9 Mon Sep 17 00:00:00 2001 From: humengyao Date: Thu, 5 Sep 2024 20:16:35 -0700 Subject: [PATCH 265/347] =?UTF-8?q?5805=20=E4=BF=AE=E6=94=B9gms=5Fstats?= =?UTF-8?q?=E9=AB=98=E7=BA=A7=E5=8C=85cha=E6=94=B6=E9=9B=86=E4=B8=80?= =?UTF-8?q?=E5=8D=83=E5=BC=A0=E8=A1=A8=E6=8A=A5=E9=94=99bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/gms_stats/expected/gms_stats.out | 2 +- contrib/gms_stats/gms_stats.cpp | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/contrib/gms_stats/expected/gms_stats.out b/contrib/gms_stats/expected/gms_stats.out index 24df6425b5..0453fdeb38 100644 --- a/contrib/gms_stats/expected/gms_stats.out +++ b/contrib/gms_stats/expected/gms_stats.out @@ -17,7 +17,7 @@ end; / NOTICE: PL/SQL procedure successfully completed. CONTEXT: SQL statement "CALL gms_stats.gather_schema_stats('gms_stats_test')" -PL/pgSQL function inline_code_block line 3 at PERFORM +PL/pgSQL function inline_code_block line 2 at PERFORM select schemaname, tablename, attname, avg_width, most_common_vals, most_common_freqs from pg_stats where schemaname='gms_stats_test' order by tablename, attname; schemaname | tablename | attname | avg_width | most_common_vals | most_common_freqs ----------------+-----------------+---------+-----------+------------------+------------------- diff --git a/contrib/gms_stats/gms_stats.cpp b/contrib/gms_stats/gms_stats.cpp index fef7cd5915..ba589aff85 100644 --- a/contrib/gms_stats/gms_stats.cpp +++ b/contrib/gms_stats/gms_stats.cpp @@ -48,8 +48,9 @@ static List* GetRelationsInSchema(char *namespc) ScanKeyData skey[1]; SysScanDesc sysscan; HeapTuple tuple; - char* relname; + char* relname = NULL; List* tbl_relnames = NIL; + int len; Oid nspid; nspid = get_namespace_oid(namespc, true); @@ -62,7 +63,10 @@ static List* GetRelationsInSchema(char *namespc) while (HeapTupleIsValid(tuple = systable_getnext(sysscan))) { Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tuple); if (reltup->relkind == RELKIND_RELATION || reltup->relkind == RELKIND_MATVIEW) { - relname = reltup->relname.data; + len = strlen(reltup->relname.data); + relname = (char *) palloc(len + 1); + errno_t rc = strcpy_s(relname, len + 1, reltup->relname.data); + securec_check(rc, "\0", "\0"); tbl_relnames = lappend(tbl_relnames, relname); } } @@ -90,6 +94,7 @@ static void analyze_tables(char *namespc, List *relnames_list) stmt = (VacuumStmt*)parsetree; } vacuum(stmt, InvalidOid, true, NULL, true); + pfree_ext(relnames); list_free(parsetree_list); resetStringInfo(execute_sql); } -- Gitee From 6bfff953a180a1c6ab6f2285c76229d86f5482d2 Mon Sep 17 00:00:00 2001 From: wofanzheng <2399541971@qq.com> Date: Fri, 6 Sep 2024 17:38:38 +0800 Subject: [PATCH 266/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9F=BA=E7=A1=80?= =?UTF-8?q?=E8=A1=A8=E5=92=8Ctoast=E8=A1=A8=E7=9A=84=E5=AD=98=E5=82=A8?= =?UTF-8?q?=E5=BC=95=E6=93=8E=E4=B8=8D=E4=B8=80=E8=87=B4=E6=AF=94=E8=BE=83?= =?UTF-8?q?=E9=94=99=E8=AF=AF=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/common/reloptions.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 3c3b356977..1db05d8fcd 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -991,7 +991,7 @@ Datum transformRelOptions(Datum oldOptions, List *defList, const char *namspace, const char *parentStorageType = (storageType == NULL) ? (u_sess->attr.attr_sql.enable_default_ustore_table ? "ustore" : "astore") : storageType; - if (parentStorageType != toastStorageType) { + if (pg_strcasecmp(parentStorageType, toastStorageType) != 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("parent storage type is %s but toast storage type is %s, toast should use the " "same storage type as its parent", -- Gitee From 864a80feebc09a6da62ea0f25b57b7300aaff16c Mon Sep 17 00:00:00 2001 From: zhangting Date: Fri, 6 Sep 2024 17:51:48 +0800 Subject: [PATCH 267/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9memset=5Fs=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E8=AF=AD=E6=B3=95=E9=94=99=E8=AF=AFCIPHER=5FLEN=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/libpq/be-secure.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/backend/libpq/be-secure.cpp b/src/common/backend/libpq/be-secure.cpp index 9504fa20c3..7121597d24 100644 --- a/src/common/backend/libpq/be-secure.cpp +++ b/src/common/backend/libpq/be-secure.cpp @@ -1403,7 +1403,7 @@ static void init_server_ssl_passwd(SSL_CTX* pstContext, bool enc) #endif char *keyfile; errno_t rc = 0; - rc = memset_s(u_sess->libpq_cxt.server_key, CIPHER_H + 1, 0, CIPHER_H + 1); + rc = memset_s(u_sess->libpq_cxt.server_key, CIPHER_LEN + 1, 0, CIPHER_LEN + 1); securec_check(rc, "\0", "\0"); #ifdef USE_TASSL -- Gitee From a40c22e0da5b3382b9c413dc4c7b1ce52760fa14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Fri, 6 Sep 2024 17:13:31 +0800 Subject: [PATCH 268/347] =?UTF-8?q?revert=20xstore=20deferrable=20constrai?= =?UTF-8?q?nts=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/indexcmds.cpp | 9 +++++++++ src/gausskernel/optimizer/commands/tablecmds.cpp | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp index 07bd4f351b..a7c6bde967 100644 --- a/src/gausskernel/optimizer/commands/indexcmds.cpp +++ b/src/gausskernel/optimizer/commands/indexcmds.cpp @@ -839,6 +839,15 @@ ObjectAddress DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, if (has_dedup_opt) { elog(ERROR, "Index deduplication is not supported for ustore."); } + if (stmt->deferrable == true) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_EXECUTOR), + errmsg("Ustore table does not support to set deferrable."), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check constraints of columns"))); + } } if (strcmp(stmt->accessMethod, "ubtree") == 0 && diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 88e3c4214b..779ce0c8fc 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -14936,6 +14936,16 @@ static ObjectAddress ATExecAddConstraint(List** wqueue, AlteredTableInfo* tab, R (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("column store unsupport constraint \"%s\"", GetConstraintType(newConstraint->contype)))); + if (rel->rd_tam_ops == TableAmUstore && newConstraint->deferrable == true) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COMMAND), + errmsg("Ustore table does not support to set deferrable."), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check constraints of columns"))); + } + /* * Currently, we only expect to see CONSTR_CHECK and CONSTR_FOREIGN nodes * arriving here (see the preprocessing done in parse_utilcmd.c). Use a -- Gitee From 23dfdb252ce449f2c8e0b624f491fca5e4af08fb Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Wed, 4 Sep 2024 20:48:52 +0800 Subject: [PATCH 269/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E5=AF=B9=E8=B1=A1=E4=B8=ADsubstring=E7=9A=84=E7=94=A8=E6=B3=95?= =?UTF-8?q?=E4=BB=A5=E9=80=82=E9=85=8D=E6=8F=92=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/information_schema.sql | 4 ++-- src/common/backend/utils/init/globals.cpp | 2 +- .../rollback_catalog_maindb_92_952.sql | 11 +++++++++++ .../rollback_catalog_otherdb_92_952.sql | 11 +++++++++++ .../upgrade_catalog_maindb_92_952.sql | 11 +++++++++++ .../upgrade_catalog_otherdb_92_952.sql | 11 +++++++++++ 6 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_952.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_952.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_952.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_952.sql diff --git a/src/common/backend/catalog/information_schema.sql b/src/common/backend/catalog/information_schema.sql index 6a5b03d6ab..c8b24cbea0 100644 --- a/src/common/backend/catalog/information_schema.sql +++ b/src/common/backend/catalog/information_schema.sql @@ -179,7 +179,7 @@ CREATE FUNCTION _pg_interval_type(typid oid, mod int4) RETURNS text AS $$SELECT CASE WHEN $1 IN (1186) /* interval */ - THEN pg_catalog.upper(substring(pg_catalog.format_type($1, $2) from 'interval[()0-9]* #"%#"' for '#')) + THEN pg_catalog.upper(pg_catalog.substring(pg_catalog.format_type($1, $2), 'interval[()0-9]* #"%#"', '#')) ELSE null::text END$$; @@ -1558,7 +1558,7 @@ INSERT INTO sql_implementation_info VALUES ('10003', 'CATALOG NAME', NULL, 'Y', INSERT INTO sql_implementation_info VALUES ('10004', 'COLLATING SEQUENCE', NULL, (SELECT default_collate_name FROM character_sets), NULL); INSERT INTO sql_implementation_info VALUES ('23', 'CURSOR COMMIT BEHAVIOR', 1, NULL, 'close cursors and retain prepared statements'); INSERT INTO sql_implementation_info VALUES ('2', 'DATA SOURCE NAME', NULL, '', NULL); -INSERT INTO sql_implementation_info VALUES ('17', 'DBMS NAME', NULL, (select trim(trailing ' ' from substring(pg_catalog.version() from '^[^0-9]*'))), NULL); +INSERT INTO sql_implementation_info VALUES ('17', 'DBMS NAME', NULL, (select trim(trailing ' ' from pg_catalog.substring(pg_catalog.version(), '^[^0-9]*'))), NULL); INSERT INTO sql_implementation_info VALUES ('18', 'DBMS VERSION', NULL, '???', NULL); -- filled by initdb INSERT INTO sql_implementation_info VALUES ('26', 'DEFAULT TRANSACTION ISOLATION', 2, NULL, 'READ COMMITTED; user-settable'); INSERT INTO sql_implementation_info VALUES ('28', 'IDENTIFIER CASE', 3, NULL, 'stored in mixed case - case sensitive'); diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 08248b6ef7..e8fb0239df 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,7 +76,7 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92951; +const uint32 GRAND_VERSION_NUM = 92952; /******************************************** * 2.VERSION NUM FOR EACH FEATURE diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_952.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_952.sql new file mode 100644 index 0000000000..b086e75b32 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_952.sql @@ -0,0 +1,11 @@ +CREATE OR REPLACE FUNCTION information_schema._pg_interval_type(typid oid, mod int4) RETURNS text + LANGUAGE sql + IMMUTABLE + NOT FENCED + RETURNS NULL ON NULL INPUT + AS +$$SELECT + CASE WHEN $1 IN (1186) /* interval */ + THEN pg_catalog.upper(substring(pg_catalog.format_type($1, $2) from 'interval[()0-9]* #"%#"' for '#')) + ELSE null::text + END$$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_952.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_952.sql new file mode 100644 index 0000000000..b086e75b32 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_952.sql @@ -0,0 +1,11 @@ +CREATE OR REPLACE FUNCTION information_schema._pg_interval_type(typid oid, mod int4) RETURNS text + LANGUAGE sql + IMMUTABLE + NOT FENCED + RETURNS NULL ON NULL INPUT + AS +$$SELECT + CASE WHEN $1 IN (1186) /* interval */ + THEN pg_catalog.upper(substring(pg_catalog.format_type($1, $2) from 'interval[()0-9]* #"%#"' for '#')) + ELSE null::text + END$$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_952.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_952.sql new file mode 100644 index 0000000000..4452b4b451 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_952.sql @@ -0,0 +1,11 @@ +CREATE OR REPLACE FUNCTION information_schema._pg_interval_type(typid oid, mod int4) RETURNS text + LANGUAGE sql + IMMUTABLE + NOT FENCED + RETURNS NULL ON NULL INPUT + AS +$$SELECT + CASE WHEN $1 IN (1186) /* interval */ + THEN pg_catalog.upper(pg_catalog.substring(pg_catalog.format_type($1, $2), 'interval[()0-9]* #"%#"', '#')) + ELSE null::text + END$$; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_952.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_952.sql new file mode 100644 index 0000000000..4452b4b451 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_952.sql @@ -0,0 +1,11 @@ +CREATE OR REPLACE FUNCTION information_schema._pg_interval_type(typid oid, mod int4) RETURNS text + LANGUAGE sql + IMMUTABLE + NOT FENCED + RETURNS NULL ON NULL INPUT + AS +$$SELECT + CASE WHEN $1 IN (1186) /* interval */ + THEN pg_catalog.upper(pg_catalog.substring(pg_catalog.format_type($1, $2), 'interval[()0-9]* #"%#"', '#')) + ELSE null::text + END$$; \ No newline at end of file -- Gitee From 67702f955dcebeb9b798e52a72ed060ef1ec52ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Sat, 7 Sep 2024 17:25:46 +0800 Subject: [PATCH 270/347] =?UTF-8?q?fix=20upgrade=20diff=20=EF=BC=88cherry?= =?UTF-8?q?=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../rollback_catalog_maindb_92_935.sql | 17 +++++++++-------- .../rollback_catalog_otherdb_92_935.sql | 17 +++++++++-------- .../upgrade_catalog_maindb_92_935.sql | 17 +++++++---------- .../upgrade_catalog_otherdb_92_935.sql | 17 +++++++---------- 4 files changed, 32 insertions(+), 36 deletions(-) diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql index 5ab58ef599..87e1cb7d16 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_935.sql @@ -1,14 +1,15 @@ -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +/*------ add sys fuction gs_undo_translot_dump_slot ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4541; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_slot'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(IN zone_id int4, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_slot'; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +/*------ add sys fuction gs_undo_translot_dump_xid ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(IN zone_id xid, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_xid'; /*------ add sys fuction gs_stat_undo ------*/ DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql index 5ab58ef599..87e1cb7d16 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_935.sql @@ -1,14 +1,15 @@ -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +/*------ add sys fuction gs_undo_translot_dump_slot ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4541; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_slot'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(IN zone_id int4, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_slot'; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +/*------ add sys fuction gs_undo_translot_dump_xid ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(IN zone_id xid, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_xid'; /*------ add sys fuction gs_stat_undo ------*/ DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql index f98c1eba3e..e9afb6f727 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_935.sql @@ -1,18 +1,15 @@ /*------ add sys fuction gs_undo_translot_dump_slot ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4541; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_slot'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(IN zone_id int4, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_slot'; /*------ add sys fuction gs_undo_translot_dump_xid ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(IN zone_id xid, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_xid'; /*------ add sys fuction gs_stat_undo ------*/ DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql index f98c1eba3e..e9afb6f727 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_935.sql @@ -1,18 +1,15 @@ /*------ add sys fuction gs_undo_translot_dump_slot ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, int4, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4541; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(int4, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_slot'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_slot(IN zone_id int4, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_slot'; /*------ add sys fuction gs_undo_translot_dump_xid ------*/ -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT lsn text, OUT gs_undo_translot oid) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT text, OUT oid) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4438; -CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(xid, boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT gs_undo_translot oid) -RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot_dump_xid'; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot_dump_xid(IN zone_id xid, IN read_memory boolean, OUT zone_id oid, OUT slot_xid text, OUT start_undoptr text, OUT end_undoptr text, OUT slot_ptr text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 1 as 'gs_undo_translot_dump_xid'; /*------ add sys fuction gs_stat_undo ------*/ DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo(); -- Gitee From e13da19b10a776a02fa31644856612fb51a1b87e Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Fri, 6 Sep 2024 15:19:59 +0800 Subject: [PATCH 271/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dhashmap=E4=B8=8D?= =?UTF-8?q?=E8=B6=B3=E6=83=85=E5=86=B5=E4=B8=8B=EF=BC=8C=E5=AE=9E=E6=97=B6?= =?UTF-8?q?=E6=9E=84=E5=BB=BA=E5=8F=AF=E8=83=BD=E5=8D=A1=E4=BD=8F=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ondemand_extreme_rto/page_redo.cpp | 8 ++- .../ondemand_extreme_rto/redo_utils.cpp | 69 ++++++++++++++----- .../access/ondemand_extreme_rto/redo_utils.h | 1 + 3 files changed, 56 insertions(+), 22 deletions(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index fff2cb4d4f..e263dad3f3 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -1216,8 +1216,6 @@ void PageManagerProcLsnForwarder(RedoItem *lsnForwarder) PageManagerAddRedoItemToSegWorkers(lsnForwarder); PageManagerAddRedoItemToHashMapManager(lsnForwarder); - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - PageManagerPruneIfRealtimeBuildFailover(); /* wait hashmapmng prune and segworker distribute segrecord to hashmap */ uint32 refCount; @@ -3421,6 +3419,7 @@ void HashMapManagerMain() SPSCBlockingQueuePop(g_dispatcher->segQueue); } + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; /** * step2: prune idle hashmap * @@ -3431,7 +3430,7 @@ void HashMapManagerMain() // the head of redoItem hashmap linked list ondemand_htab_ctrl_t *nextHtabCtrl = g_instance.comm_cxt.predo_cxt.redoItemHashCtrl[g_redoWorker->slotId]; // the tail of redoItem hashmap linked list - ondemand_htab_ctrl_t *targetHtabCtrl = g_dispatcher->pageLines[g_redoWorker->slotId].managerThd->redoItemHashCtrl; + ondemand_htab_ctrl_t *targetHtabCtrl = myRedoLine->managerThd->redoItemHashCtrl; // the processing redoItem hashmap ondemand_htab_ctrl_t *procHtabCtrl = nextHtabCtrl; while (nextHtabCtrl != targetHtabCtrl) { @@ -3458,6 +3457,9 @@ void HashMapManagerMain() } CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + // step4: release for global xlog memory + OndemandGlobalXLogMemReleaseIfNeed(&myRedoLine->batchThd->parseManager.memctl); + RedoInterruptCallBack(); ADD_ABNORMAL_POSITION(12); pg_usleep(500000L); /* 500 ms */ diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index b066c3bec6..a8fb1ceeb2 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -121,44 +121,75 @@ static RedoMemSlot *OndemandGlobalXLogMemAlloc() return NULL; } -static void OndemandGlobalXLogMemReleaseIfNeed(RedoMemManager *memctl) +static RedoMemSlot *GetTailSlot(RedoMemManager *memctl, Buffer headSlotBuffer) +{ + RedoMemSlot *tailSlot = &memctl->memslot[headSlotBuffer - 1]; + while (tailSlot->freeNext != InvalidBuffer) { + tailSlot = &memctl->memslot[tailSlot->freeNext - 1]; + } + return tailSlot; +} + +static void InsertBatchXLogMemToSlot(RedoMemManager *memctl, Buffer *targetSlot, Buffer headSlotBuffer) +{ + RedoMemSlot *tailSlot = GetTailSlot(memctl, headSlotBuffer); + Buffer oldFirst = AtomicReadBuffer(targetSlot); + pg_memory_barrier(); + do { + AtomicWriteBuffer(&tailSlot->freeNext, oldFirst); + } while (!AtomicCompareExchangeBuffer(targetSlot, &oldFirst, headSlotBuffer)); +} + +// used in hashmap manager, global firstreleaseslot should not be InvalidBuffer in healthy condition +void OndemandGlobalXLogMemReleaseIfNeed(RedoMemManager *memctl) { RedoMemManager *glbmemctl = &ondemand_extreme_rto::g_dispatcher->parseManager.memctl; if (AtomicReadBuffer(&glbmemctl->firstreleaseslot) == InvalidBuffer) { - Buffer firstreleaseslot = AtomicExchangeBuffer(&memctl->firstreleaseslot, InvalidBuffer); - Buffer invalidbuffer = InvalidBuffer; - if (!AtomicCompareExchangeBuffer(&glbmemctl->firstreleaseslot, &invalidbuffer, firstreleaseslot)) { - AtomicWriteBuffer(&memctl->firstreleaseslot, firstreleaseslot); + Buffer firstReleaseSlotBuffer = AtomicExchangeBuffer(&memctl->firstreleaseslot, InvalidBuffer); + if (firstReleaseSlotBuffer == InvalidBuffer) { + // set pipeline firstfreeslot to pipeline firstreleaseslot, for next loop to global firstreleaseslot + Buffer firstFreeSlotBuffer = AtomicExchangeBuffer(&memctl->firstfreeslot, InvalidBuffer); + if (firstFreeSlotBuffer != InvalidBuffer) { + InsertBatchXLogMemToSlot(memctl, &memctl->firstreleaseslot, firstFreeSlotBuffer); + } + } else { + // set pipeline firstreleaseslot for global firstreleaseslot + Buffer invalidBuffer = InvalidBuffer; + if (!AtomicCompareExchangeBuffer(&glbmemctl->firstreleaseslot, &invalidBuffer, firstReleaseSlotBuffer)) { + // exchange failed, give back + InsertBatchXLogMemToSlot(memctl, &memctl->firstreleaseslot, firstReleaseSlotBuffer); + } } } } RedoMemSlot *OndemandXLogMemAlloc(RedoMemManager *memctl) { - RedoMemSlot *nextfreeslot = NULL; + RedoMemSlot *nextFreeSlot = NULL; do { - if (memctl->firstfreeslot == InvalidBuffer) { - memctl->firstfreeslot = AtomicExchangeBuffer(&memctl->firstreleaseslot, InvalidBuffer); + if (AtomicReadBuffer(&memctl->firstfreeslot) == InvalidBuffer) { + AtomicWriteBuffer(&memctl->firstfreeslot, AtomicExchangeBuffer(&memctl->firstreleaseslot, InvalidBuffer)); pg_read_barrier(); } - if (memctl->firstfreeslot != InvalidBuffer) { - nextfreeslot = &(memctl->memslot[memctl->firstfreeslot - 1]); - memctl->firstfreeslot = nextfreeslot->freeNext; - nextfreeslot->freeNext = InvalidBuffer; + Buffer firstFreeSlotBuffer = AtomicExchangeBuffer(&memctl->firstfreeslot, InvalidBuffer); + if (firstFreeSlotBuffer != InvalidBuffer) { + nextFreeSlot = &(memctl->memslot[firstFreeSlotBuffer - 1]); + AtomicWriteBuffer(&memctl->firstfreeslot, nextFreeSlot->freeNext); + nextFreeSlot->freeNext = InvalidBuffer; } - if (nextfreeslot == NULL) { - nextfreeslot = OndemandGlobalXLogMemAlloc(); + if (nextFreeSlot == NULL) { + nextFreeSlot = OndemandGlobalXLogMemAlloc(); } if (memctl->doInterrupt != NULL) { memctl->doInterrupt(); } - } while (nextfreeslot == NULL); + } while (nextFreeSlot == NULL); pg_atomic_fetch_add_u32(&memctl->usedblknum, 1); - return nextfreeslot; + return nextFreeSlot; } void OndemandXLogMemRelease(RedoMemManager *memctl, Buffer bufferid) @@ -181,8 +212,6 @@ void OndemandXLogMemRelease(RedoMemManager *memctl, Buffer bufferid) AtomicWriteBuffer(&bufferslot->freeNext, oldFirst); } while (!AtomicCompareExchangeBuffer(&releasememctl->firstreleaseslot, &oldFirst, bufferid)); pg_atomic_fetch_sub_u32(&memctl->usedblknum, 1); - - OndemandGlobalXLogMemReleaseIfNeed(memctl); } @@ -261,6 +290,7 @@ XLogRecParseState *OndemandXLogParseBufferAllocList(RedoParseManager *parsemanag void OndemandXLogParseBufferRelease(XLogRecParseState *recordstate) { if (recordstate->distributeStatus == XLOG_SKIP_DISTRIBUTE) { + Assert(!SS_ONDEMAND_REALTIME_BUILD_NORMAL); // alloc in pageRedoWorker or backends pfree(recordstate); return; @@ -335,7 +365,8 @@ long RedoRelationForOndemandExtremeRTO(Relation relation) { * @param dbId the dbNode of database * @return the redoEntry num of target database */ -long RedoDatabaseForOndemandExtremeRTO(Oid dbNode) { +long RedoDatabaseForOndemandExtremeRTO(Oid dbNode) +{ long entryNum = 0; Assert(OidIsValid(dbNode)); diff --git a/src/include/access/ondemand_extreme_rto/redo_utils.h b/src/include/access/ondemand_extreme_rto/redo_utils.h index 3dcbc83ee3..07d72b51ca 100644 --- a/src/include/access/ondemand_extreme_rto/redo_utils.h +++ b/src/include/access/ondemand_extreme_rto/redo_utils.h @@ -54,5 +54,6 @@ void OnDemandUpdateRealtimeBuildPrunePtr(); void OnDemandNotifyHashMapPruneIfNeed(); XLogRecParseType GetCurrentXLogRecParseType(XLogRecParseState *preState); bool IsRecParseStateHaveChildState(XLogRecParseState *checkState); +void OndemandGlobalXLogMemReleaseIfNeed(RedoMemManager *memctl); #endif /* ONDEMAND_EXTREME_RTO_REDO_UTILS_H */ \ No newline at end of file -- Gitee From fc9f6133d6223be932a610f3c12d039a669df076 Mon Sep 17 00:00:00 2001 From: zzh Date: Mon, 9 Sep 2024 09:56:08 +0800 Subject: [PATCH 272/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dcreate=5Fmaster=5Fsla?= =?UTF-8?q?ve.sh=E8=84=9A=E6=9C=AC=E6=89=A7=E8=A1=8C=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitattributes | 1 + docker/dockerfiles/create_master_slave.sh | 392 +++++++++++----------- 2 files changed, 197 insertions(+), 196 deletions(-) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..9503a00878 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +docker/dockerfiles/create_master_slave.sh text=auto \ No newline at end of file diff --git a/docker/dockerfiles/create_master_slave.sh b/docker/dockerfiles/create_master_slave.sh index f09f36ee46..7a2ab7050b 100644 --- a/docker/dockerfiles/create_master_slave.sh +++ b/docker/dockerfiles/create_master_slave.sh @@ -1,196 +1,196 @@ -#!/bin/bash -# create master and slave -# Copyright (c) Huawei Technologies Co., Ltd. 2020-2028. All rights reserved. -# -#openGauss is licensed under Mulan PSL v2. -#You can use this software according to the terms and conditions of the Mulan PSL v2. -#You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# create_master_slave.sh -# create master and slave -# -# IDENTIFICATION -# GaussDBKernel/server/docker/dockerfiles/create_master_slave.sh -# -#------------------------------------------------------------------------- - -#set OG_SUBNET,GS_PASSWORD,MASTER_IP,SLAVE_1_IP,MASTER_HOST_PORT,MASTER_LOCAL_PORT,SLAVE_1_HOST_PORT,SLAVE_1_LOCAL_PORT,MASTER_NODENAME,SLAVE_NODENAME - -# Define default values -NETWORK_NAME="opengaussnetwork" -OG_SUBNET="172.11.0.0/24" -GS_PASSWORD="Enmo@123" -MASTER_IP="172.11.0.101" -MASTER_HOST_PORT="5432" -MASTER_NODENAME="dn_6001" -VERSION="5.0.0" -# Define default values for slaves -SLAVE_IP=("172.11.0.102" "172.11.0.103" "172.11.0.104" "172.11.0.105" "172.11.0.106" "172.11.0.107" "172.11.0.108" "172.11.0.109") -SLAVE_HOST_PORT=("6432" "7432" "8432" "9432" "10432" "11432" "12432" "13432") -SLAVE_NODENAME=("dn_6002" "dn_6003" "dn_6004" "dn_6005" "dn_6006" "dn_6007" "dn_6008" "dn_6009") -SLAVE_COUNT=1 - -ARGS=$(getopt -o h --long OG_SUBNET:,GS_PASSWORD:,MASTER_IP:,MASTER_HOST_PORT:,MASTER_LOCAL_PORT:,MASTER_NODENAME:,VERSION:,SLAVE_COUNT:,NETWORK_NAME: -- "$@") -if [ $? != 0 ]; then - echo "参数解析错误" - exit 1 -fi -eval set -- "$ARGS" - -# Use getopts to process command line arguments -while true; do - case "$1" in - -h) - echo "Usage: $0 [--OG_SUBNET value] [--GS_PASSWORD value] [--MASTER_IP value] [--MASTER_HOST_PORT value] [--MASTER_NODENAME value] [--VERSION value] [--SLAVE_COUNT value] [--SLAVE_NODENAME value] [--SLAVE_IP value] [--SLAVE_HOST_PORT value] [--NETWORK_NAME value]" - shift - ;; - --OG_SUBNET) - OG_SUBNET="$2" - shift 2 - ;; - --GS_PASSWORD) - GS_PASSWORD="$2" - shift 2 - ;; - --MASTER_IP) - MASTER_IP="$2" - shift 2 - ;; - --MASTER_HOST_PORT) - MASTER_HOST_PORT="$2" - shift 2 - ;; - --MASTER_LOCAL_PORT) - MASTER_LOCAL_PORT="$2" - shift 2 - ;; - --MASTER_NODENAME) - MASTER_NODENAME="$2" - shift 2 - ;; - --VERSION) - VERSION="$2" - shift 2 - ;; - --SLAVE_COUNT) - SLAVE_COUNT="$2" - shift 2 - ;; - --NETWORK_NAME) - NETWORK_NAME="$2" - shift 2 - ;; - --) - shift - break - ;; - *) - echo "Invalid option: -$OPTARG" >&2 - exit 1 - ;; - esac -done - -# Output the set values -echo "OG_SUBNET set $OG_SUBNET" -echo "GS_PASSWORD set $GS_PASSWORD" -echo "MASTER_IP set $MASTER_IP" -echo "MASTER_HOST_PORT set $MASTER_HOST_PORT" -echo "MASTER_NODENAME set $MASTER_NODENAME" -echo "openGauss VERSION set $VERSION" -echo "SLAVE_COUNT set $SLAVE_COUNT" -echo "SLAVE_NODENAME set $SLAVE_NODENAME" -echo "SLAVE_IP set $SLAVE_IP" -echo "SLAVE_HOST_PORT set $SLAVE_HOST_PORT" -echo "NETWORK_NAME set $NETWORK_NAME" - -# Loop through and process each slave's information -for (( i=0; i&2 + exit 1 + ;; + esac +done + +# Output the set values +echo "OG_SUBNET set $OG_SUBNET" +echo "GS_PASSWORD set $GS_PASSWORD" +echo "MASTER_IP set $MASTER_IP" +echo "MASTER_HOST_PORT set $MASTER_HOST_PORT" +echo "MASTER_NODENAME set $MASTER_NODENAME" +echo "openGauss VERSION set $VERSION" +echo "SLAVE_COUNT set $SLAVE_COUNT" +echo "SLAVE_NODENAME set $SLAVE_NODENAME" +echo "SLAVE_IP set $SLAVE_IP" +echo "SLAVE_HOST_PORT set $SLAVE_HOST_PORT" +echo "NETWORK_NAME set $NETWORK_NAME" + +# Loop through and process each slave's information +for (( i=0; i Date: Mon, 9 Sep 2024 10:52:53 +0800 Subject: [PATCH 273/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9E=81=E8=87=B4RTO?= =?UTF-8?q?=E5=A4=87=E6=9C=BA=E8=AF=BB=E6=8A=A5=E9=94=99=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/redo/standby_read/block_info_proc.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/gausskernel/storage/access/redo/standby_read/block_info_proc.cpp b/src/gausskernel/storage/access/redo/standby_read/block_info_proc.cpp index c5bc9a5c9b..a364d964d0 100644 --- a/src/gausskernel/storage/access/redo/standby_read/block_info_proc.cpp +++ b/src/gausskernel/storage/access/redo/standby_read/block_info_proc.cpp @@ -302,11 +302,6 @@ bool get_page_lsn_info(const BufferTag& buf_tag, BufferAccessStrategy strategy, get_lsn_info_for_read(buf_tag, block_meta_info->base_page_info_list.prev, lsn_info, read_lsn); UnlockReleaseBuffer(buf); - if (lsn_info->lsn_num == 0 && XLogRecPtrIsInvalid(lsn_info->base_page_lsn)) { - ereport(ERROR, ((errmsg("cannot find valid lsn info %u/%u/%u %d %u read lsn %lu, min lsn %lu", - buf_tag.rnode.spcNode, buf_tag.rnode.dbNode, buf_tag.rnode.relNode, buf_tag.forkNum, - buf_tag.blockNum, read_lsn, block_meta_info->min_lsn)))); - } return true; } -- Gitee From b539c916e4dedb5eabd7eab521cfe1ed231dafe6 Mon Sep 17 00:00:00 2001 From: xushukun Date: Thu, 5 Sep 2024 07:40:43 +0000 Subject: [PATCH 274/347] =?UTF-8?q?=E3=80=90CMIT=E3=80=91gsql=20core=20whe?= =?UTF-8?q?n=20execute=20copy=20with=20header?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/psql/copy.cpp | 10 +++++----- src/test/regress/expected/copy2.out | 10 ++++++++++ src/test/regress/sql/copy2.sql | 9 +++++++++ 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/bin/psql/copy.cpp b/src/bin/psql/copy.cpp index 401616cc99..2c52063fd3 100644 --- a/src/bin/psql/copy.cpp +++ b/src/bin/psql/copy.cpp @@ -226,12 +226,12 @@ static bool ParseParallelOption(struct copy_options* result, char** errToken) xstrcat(&result->after_tofrom, token); token = strtokx(nullptr, whitespace, ",()", NULL, 0, false, false, pset.encoding); - if (pg_strcasecmp(token, "true") == 0 || pg_strcasecmp(token, "on") == 0) { - result->hasHeader = true; - xstrcat(&result->after_tofrom, " false"); - } else { + if (token != nullptr) { + bool res = (pg_strcasecmp(token, "true") == 0 || pg_strcasecmp(token, "on") == 0); + result->hasHeader = (result->hasHeader || res); + const char *resToken = res ? "false" : token; xstrcat(&result->after_tofrom, " "); - xstrcat(&result->after_tofrom, token); + xstrcat(&result->after_tofrom, resToken); } } else { xstrcat(&result->after_tofrom, " "); diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out index 68f2fdb3b2..cb87764270 100644 --- a/src/test/regress/expected/copy2.out +++ b/src/test/regress/expected/copy2.out @@ -363,3 +363,13 @@ drop trigger insert_measurement_trigger on measurement; drop function measurement_insert_trigger; drop table measurement; drop table measurement_movement; +create table copy_header_src (c1 int); +create table copy_header_dest (c1 int); +insert into copy_header_src select generate_series(1,10); +\copy copy_header_src to '~/copy_header_src.csv' with csv header; +\copy copy_header_dest from '~/copy_header_src.csv' with csv header'on'; +ERROR: syntax error at or near "'on'" +LINE 1: COPY copy_header_dest FROM STDIN csv header'on'; + ^ +drop table copy_header_src; +drop table copy_header_dest; diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql index df3ad4292b..b77eba045f 100644 --- a/src/test/regress/sql/copy2.sql +++ b/src/test/regress/sql/copy2.sql @@ -271,3 +271,12 @@ drop trigger insert_measurement_trigger on measurement; drop function measurement_insert_trigger; drop table measurement; drop table measurement_movement; + +create table copy_header_src (c1 int); +create table copy_header_dest (c1 int); +insert into copy_header_src select generate_series(1,10); +\copy copy_header_src to '~/copy_header_src.csv' with csv header; +\copy copy_header_dest from '~/copy_header_src.csv' with csv header'on'; + +drop table copy_header_src; +drop table copy_header_dest; -- Gitee From ed878bad84c30d6f187131e8f8c6229678fecc01 Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Mon, 9 Sep 2024 14:25:12 +0800 Subject: [PATCH 275/347] =?UTF-8?q?gs=5Fdump=E5=AF=BC=E5=87=BAB=E5=BA=93?= =?UTF-8?q?=E6=97=B6=E8=AE=BE=E7=BD=AEGUC=E5=8F=82=E6=95=B0=20quote=5Fall?= =?UTF-8?q?=5Fidentifiers=E5=BC=80=E5=90=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_dump/pg_dump.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index f32ecc84fe..a71b4a0735 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -2126,6 +2126,10 @@ static void setup_connection(Archive* AH) /* * Quote all identifiers, if requested. */ + if (findDBCompatibility(AH, PQdb(conn)) && hasSpecificExtension(AH, "dolphin")) { + quote_all_identifiers = true; + } + if (quote_all_identifiers && AH->remoteVersion >= 90100) ExecuteSqlStatement(AH, "SET quote_all_identifiers = true"); } -- Gitee From 05c0c713c89b7b06f3b98472a936c57ed40f58be Mon Sep 17 00:00:00 2001 From: dongning12 Date: Fri, 6 Sep 2024 15:58:41 +0800 Subject: [PATCH 276/347] =?UTF-8?q?=E3=80=906.0.0=E5=88=86=E6=94=AF?= =?UTF-8?q?=E3=80=91=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0=E5=8C=96=E3=80=91?= =?UTF-8?q?=E3=80=90=E5=90=8C=E6=AD=A5DMS=E7=82=B9=E3=80=919.6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 2 +- src/include/ddes/dms/dms_api.h | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 73fdda081d..da57c1e167 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=8b64ce46c8cfa9a978604b346b0d32b264c8ee6c +dms_commit_id=6de342c050a9ff2ac5cb5b462a699e46c88bd156 dss_commit_id=621eb9d6aac34726db404446511be2de9ae32a3f cbb_commit_id=2ea0e4ea6349f00ca85793480ee1ced952c3c8c7 diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index c5b98f264f..9b9bf83898 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -34,7 +34,7 @@ extern "C" { #define DMS_LOCAL_MINOR_VER_WEIGHT 1000 #define DMS_LOCAL_MAJOR_VERSION 0 #define DMS_LOCAL_MINOR_VERSION 0 -#define DMS_LOCAL_VERSION 164 +#define DMS_LOCAL_VERSION 166 #define DMS_SUCCESS 0 #define DMS_ERROR (-1) @@ -618,6 +618,7 @@ typedef enum en_dms_wait_event { DMS_EVT_REQ_CKPT, DMS_EVT_PROC_GENERIC_REQ, DMS_EVT_PROC_REFORM_REQ, + DMS_EVT_DCS_TRANSTER_PAGE_LSNDWAIT, // add new enum at tail, or make adaptations to openGauss DMS_EVT_COUNT, @@ -899,6 +900,7 @@ typedef void (*dms_log_output)(dms_log_id_t log_type, dms_log_level_t log_level, unsigned int code_line_num, const char *module_name, const char *format, ...); typedef int (*dms_log_flush)(void *db_handle, unsigned long long *lsn); typedef int (*dms_log_conditional_flush)(void *db_handle, unsigned long long lfn, unsigned long long *lsn); +typedef void (*dms_lsnd_wait)(void *db_handle, unsigned long long lfn); typedef int(*dms_process_edp)(void *db_handle, dms_edp_info_t *pages, unsigned int count); typedef void (*dms_clean_ctrl_edp)(void *db_handle, dms_buf_ctrl_t *dms_ctrl); typedef char *(*dms_display_pageid)(char *display_buf, unsigned int count, char *pageid); @@ -978,15 +980,17 @@ typedef void (*dms_set_current_point)(void *db_handle); typedef void (*dms_get_db_role)(void *db_handle, unsigned int *role); typedef void (*dms_check_lrpl_takeover)(void *db_handle, unsigned int *need_takeover); typedef void (*dms_reset_link)(void *db_handle); -typedef void (*dms_set_online_list)(void *db_handle, unsigned long long online_list); +typedef void (*dms_set_online_list)(void *db_handle, unsigned long long online_list, unsigned int reformer_id); typedef int (*dms_standby_update_remove_node_ctrl)(void *db_handle, unsigned long long online_list); -typedef int (*dms_standby_stop_thread)(void *db_handle, unsigned long long online_list, unsigned int reformer_id); +typedef int (*dms_standby_stop_thread)(void *db_handle); typedef int (*dms_standby_reload_node_ctrl)(void *db_handle); typedef int (*dms_standby_stop_server)(void *db_handle); typedef int (*dms_standby_resume_server)(void *db_handle); typedef int (*dms_start_lrpl)(void *db_handle, int is_reformer); typedef int (*dms_stop_lrpl)(void *db_handle, int is_reformer); typedef int (*dms_az_switchover_demote_phase1)(void *db_handle); +typedef int (*dms_az_switchover_demote_update_node_ctrl)(void *db_handle, unsigned long long online_list); +typedef int (*dms_az_switchover_demote_change_role)(void *db_handle); typedef int (*dms_az_switchover_demote_approve)(void *db_handle); typedef int (*dms_az_switchover_demote_phase2)(void *db_handle); typedef int (*dms_az_switchover_promote_phase1)(void *db_handle); @@ -1105,6 +1109,7 @@ typedef struct st_dms_callback { dms_log_output log_output; dms_log_flush log_flush; dms_log_conditional_flush log_conditional_flush; + dms_lsnd_wait lsnd_wait; dms_process_edp ckpt_edp; dms_process_edp clean_edp; dms_ckpt_session ckpt_session; @@ -1180,6 +1185,8 @@ typedef struct st_dms_callback { // for az switchover and az failover dms_az_switchover_demote_phase1 az_switchover_demote_phase1; + dms_az_switchover_demote_update_node_ctrl az_switchover_demote_update_node_ctrl; + dms_az_switchover_demote_change_role az_switchover_demote_change_role; dms_az_switchover_demote_approve az_switchover_demote_approve; dms_az_switchover_demote_phase2 az_switchover_demote_phase2; dms_az_switchover_promote_phase1 az_switchover_promote_phase1; @@ -1402,6 +1409,13 @@ typedef struct st_mes_task_priority_stats_info { unsigned long long msgitem_free_num; } mes_task_priority_stats_info_t; +typedef struct st_mem_info_stat { + const char *area; + unsigned long long total; + unsigned long long used; + double used_percentage; +} mem_info_stat_t; + #ifdef __cplusplus } #endif -- Gitee From 4dc73206169af80db7e06d79b2c2f28a28ba5deb Mon Sep 17 00:00:00 2001 From: leiziwei Date: Fri, 30 Aug 2024 18:10:28 +0800 Subject: [PATCH 277/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2=E6=97=B6=E4=BD=BF=E7=94=A8=E7=9A=84typmod?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/pl_exec.cpp | 9 ++++----- .../regress/expected/plpgsql_cursor_rowtype.out | 12 ++++++++++++ src/test/regress/sql/plpgsql_cursor_rowtype.sql | 13 +++++++++++++ 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index f3cbd2f333..29201304c3 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -1268,6 +1268,7 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d int32 valtypmod; Form_pg_attribute tattr = TupleDescAttr(new_tupdesc, fnum); Form_pg_attribute attr = TupleDescAttr(rec->tupdesc, anum); + Oid reqtypemod = tattr->atttypmod; Oid reqtype = tattr->atttypid; while (anum < new_natts && attr->attisdropped) { @@ -1276,7 +1277,6 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d if (anum < new_natts) { value = SPI_getbinval(rec->tup, rec->tupdesc, anum + 1, &isnull); valtype = attr->atttypid; - valtypmod = attr->atttypmod; anum++; } else { /* When source value is missing */ @@ -1287,7 +1287,7 @@ static void exec_cursor_rowtype_init(PLpgSQL_execstate *estate, PLpgSQL_datum *d errhint("Make sure the query returns the exact list of columns."))); } rowtype_column_len_check(tattr, rec->tup, rec->tupdesc, valtype, anum); - newvalues[fnum] = exec_simple_cast_value(estate, value, valtype, reqtype, valtypmod, isnull); + newvalues[fnum] = exec_simple_cast_value(estate, value, valtype, reqtype, reqtypemod, isnull); newnulls[fnum] = isnull; } @@ -9585,7 +9585,7 @@ static void exec_move_row_from_fields(PLpgSQL_execstate *estate, PLpgSQL_datum * Datum value; bool isnull; Oid valtype; - int32 valtypmod; + int32 reqtypmod = TupleDescAttr(tupdesc, anum)->atttypmod; Oid reqtype = TupleDescAttr(tupdesc, anum)->atttypid; while (anum < td_natts && TupleDescAttr(tupdesc, anum)->attisdropped) @@ -9594,7 +9594,6 @@ static void exec_move_row_from_fields(PLpgSQL_execstate *estate, PLpgSQL_datum * if (anum < td_natts) { value = SPI_getbinval(var_tup, var_tupdesc, fnum + 1, &isnull); valtype = attr->atttypid; - valtypmod = TupleDescAttr(var_tupdesc, fnum)->atttypmod; anum++; } else { /* When source value is missing */ @@ -9605,7 +9604,7 @@ static void exec_move_row_from_fields(PLpgSQL_execstate *estate, PLpgSQL_datum * errhint("Make sure the query returns the exact list of columns."))); } rowtype_column_len_check(tattr, var_tup, var_tupdesc, valtype, fnum); - newvalues[fnum] = exec_simple_cast_value(estate, value, valtype, reqtype, valtypmod, isnull); + newvalues[fnum] = exec_simple_cast_value(estate, value, valtype, reqtype, reqtypmod, isnull); newnulls[fnum] = isnull; } diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 01b310b9a8..0069d6d18c 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -1383,6 +1383,18 @@ end; call pro_cs_trans_1(); --?.* CONTEXT: PL/pgSQL function pro_cs_trans_1() line 10 at CLOSE +create table numeric_test(col1 numeric(10,3)); +insert into numeric_test values(100.1111); +declare + cursor cur1 is select * from numeric_test; + source cur1%rowtype := (100.2345); +begin + raise info 'col1 : %',source.col1; + insert into numeric_test values (source.col1); +end; +/ +INFO: col1 : 100.235 +drop table numeric_test; create or replace procedure pro_cs_trans_1() as cursor c1 is select * from cs_trans_1 order by 1; rec_1 cs_trans_1%rowtype; diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 14b27a2152..72006957d2 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -1082,6 +1082,19 @@ end; call pro_cs_trans_1(); +create table numeric_test(col1 numeric(10,3)); +insert into numeric_test values(100.1111); + +declare + cursor cur1 is select * from numeric_test; + source cur1%rowtype := (100.2345); +begin + raise info 'col1 : %',source.col1; + insert into numeric_test values (source.col1); +end; +/ +drop table numeric_test; + create or replace procedure pro_cs_trans_1() as cursor c1 is select * from cs_trans_1 order by 1; rec_1 cs_trans_1%rowtype; -- Gitee From 6daf703e8b8d15e13caa55a0daf463db3f1c7405 Mon Sep 17 00:00:00 2001 From: z00848344 Date: Mon, 9 Sep 2024 19:23:18 +0800 Subject: [PATCH 278/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dpassword=5Fmin=5Fleng?= =?UTF-8?q?th=E8=AE=BE=E7=BD=AE=E4=B8=BA999=E5=A4=B1=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../backend/utils/misc/guc/guc_security.cpp | 22 ++----------------- .../process/postmaster/postmaster.cpp | 12 ++++++++++ 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/src/common/backend/utils/misc/guc/guc_security.cpp b/src/common/backend/utils/misc/guc/guc_security.cpp index cb51b95c31..23ece64b5d 100755 --- a/src/common/backend/utils/misc/guc/guc_security.cpp +++ b/src/common/backend/utils/misc/guc/guc_security.cpp @@ -161,8 +161,6 @@ static bool check_ssl(bool* newval, void** extra, GucSource source); /* Database Security: Support password complexity */ static bool check_int_parameter(int* newval, void** extra, GucSource source); static bool check_ssl_ciphers(char** newval, void** extra, GucSource source); -static bool check_password_min_length(int* newval, void** extra, GucSource source); -static bool check_password_max_length(int* newval, void** extra, GucSource source); static void InitSecurityConfigureNamesBool(); static void InitSecurityConfigureNamesInt(); @@ -627,7 +625,7 @@ static void InitSecurityConfigureNamesInt() 8, 6, MAX_PASSWORD_LENGTH, - check_password_min_length, + check_int_parameter, NULL, NULL}, @@ -642,7 +640,7 @@ static void InitSecurityConfigureNamesInt() 32, 6, MAX_PASSWORD_LENGTH, - check_password_max_length, + check_int_parameter, NULL, NULL}, @@ -1448,19 +1446,3 @@ static bool check_ssl_ciphers(char** newval, void** extra, GucSource) pfree_ext(ciphers_list); return true; } - -static bool check_password_min_length(int* newval, void** extra, GucSource source) -{ - if (*newval >= 0 && *newval <= u_sess->attr.attr_security.Password_max_length) { - return true; - } - return false; -} - -static bool check_password_max_length(int* newval, void** extra, GucSource source) -{ - if (*newval >= 0 && *newval >= u_sess->attr.attr_security.Password_min_length) { - return true; - } - return false; -} \ No newline at end of file diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index e56a4976bc..912578f78c 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -66,6 +66,7 @@ */ #include "postgres.h" #include "knl/knl_variable.h" +#include "knl/knl_guc/knl_session_attr_security.h" #ifdef ENABLE_BBOX #include "gs_bbox.h" #endif @@ -3591,6 +3592,16 @@ static void CheckShareStorageConfigConflicts(void) } } +static void CheckPasswordLenConfigConflics(void) +{ + if (u_sess->attr.attr_security.Password_min_length > u_sess->attr.attr_security.Password_max_length) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("password_min_length (%d) should be no more than password_max_length (%d).", + u_sess->attr.attr_security.Password_min_length, + u_sess->attr.attr_security.Password_max_length))); + } +} + /* * Check for invalid combinations of GUC settings during starting up. */ @@ -3654,6 +3665,7 @@ static void CheckGUCConflicts(void) } CheckExtremeRtoGUCConflicts(); CheckShareStorageConfigConflicts(); + CheckPasswordLenConfigConflics(); #if ((defined(USE_SSL)) && (defined(USE_TASSL))) CheckSSLConflict(); #endif -- Gitee From 5c3b91c6140cc2f4e629b66dadea4d15ffea06d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Mon, 9 Sep 2024 21:05:05 +0800 Subject: [PATCH 279/347] =?UTF-8?q?readfuncs=E6=96=B0=E5=B1=9E=E6=80=A7?= =?UTF-8?q?=E6=A0=B9=E6=8D=AE=E6=98=AF=E5=90=A6=E6=9C=89=E5=80=BC=E8=BF=9B?= =?UTF-8?q?=E8=A1=8C=E5=8F=8D=E5=BA=8F=E5=88=97=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/readfuncs.cpp | 60 +++++++++++++++----------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 7df5fdb85d..de7553112d 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -2904,10 +2904,11 @@ static MinMaxExpr* _readMinMaxExpr(void) READ_ENUM_FIELD(op, MinMaxOp); READ_NODE_FIELD(args); READ_LOCATION_FIELD(location); - if (t_thrd.proc->workingVersionNum >= MINMAXEXPR_CMPTYPE_VERSION_NUM) { + IF_EXIST (cmptype) { READ_OID_FIELD(cmptype); + } + IF_EXIST (cmpargs) { READ_NODE_FIELD(cmpargs); - READ_TYPEINFO_FIELD(cmptype); } READ_TYPEINFO_FIELD(minmaxtype); @@ -3067,7 +3068,7 @@ static SetToDefault* _readSetToDefault(void) READ_INT_FIELD(typeMod); READ_OID_FIELD(collation); READ_LOCATION_FIELD(location); - if (t_thrd.proc->workingVersionNum >= UNION_NULL_VERSION_NUM) { + IF_EXIST (lrchild_unknown) { READ_BOOL_FIELD(lrchild_unknown); } @@ -3526,14 +3527,20 @@ static Plan* _readPlan(Plan* local_node) READ_BOOL_FIELD(recursive_union_controller); READ_INT_FIELD(control_plan_nodeid); READ_BOOL_FIELD(is_sync_plannode); - if (t_thrd.proc->workingVersionNum >= ML_OPT_MODEL_VERSION_NUM) { + IF_EXIST (pred_rows) { READ_FLOAT_FIELD(pred_rows); + } + IF_EXIST (pred_startup_time) { READ_FLOAT_FIELD(pred_startup_time); + } + IF_EXIST (pred_total_time) { READ_FLOAT_FIELD(pred_total_time); + } + IF_EXIST (pred_max_memory) { READ_LONG_FIELD(pred_max_memory); } #ifdef USE_SPQ - if (t_thrd.proc->workingVersionNum >= SPQ_VERSION_NUM) { + IF_EXIST (spq_scan_partial) { READ_BOOL_FIELD(spq_scan_partial); } #endif @@ -3616,7 +3623,9 @@ static Agg* _readAgg(Agg* local_node) READ_OPERATOROID_ARRAY(grpOperators, numCols); #ifndef ENABLE_MULTIPLE_NODES if (!IS_SPQ_RUNNING && t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { - READ_OPERATOROID_ARRAY(grp_collations, numCols); + IF_EXIST (grp_collations) { + READ_OPERATOROID_ARRAY(grp_collations, numCols); + } } #endif @@ -3650,7 +3659,7 @@ static WindowAgg* _readWindowAgg(WindowAgg* local_node) READ_ATTR_ARRAY(partColIdx, partNumCols); READ_OPERATOROID_ARRAY(partOperators, partNumCols); #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { + IF_EXIST (part_collations) { READ_OPERATOROID_ARRAY(part_collations, partNumCols); } #endif @@ -3658,7 +3667,7 @@ static WindowAgg* _readWindowAgg(WindowAgg* local_node) READ_ATTR_ARRAY(ordColIdx, ordNumCols); READ_OPERATOROID_ARRAY(ordOperators, ordNumCols); #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { + IF_EXIST (ord_collations) { READ_OPERATOROID_ARRAY(ord_collations, ordNumCols); } #endif @@ -3730,7 +3739,6 @@ static CStoreIndexAnd* _readCStoreIndexAnd(CStoreIndexAnd* local_node) static PruningResult* _readPruningResult(PruningResult* local_node) { - const int num = 92267; READ_LOCALS_NULL(PruningResult); READ_TEMP_LOCALS(); @@ -3743,7 +3751,7 @@ static PruningResult* _readPruningResult(PruningResult* local_node) IF_EXIST(ls_selectedSubPartitions) { READ_NODE_FIELD(ls_selectedSubPartitions); } - if (t_thrd.proc->workingVersionNum >= num) { + IF_EXIST(expr) { READ_NODE_FIELD(expr); } IF_EXIST(isPbeSinlePartition) { @@ -4229,7 +4237,7 @@ static Unique* _readUnique(Unique* local_node) READ_ATTR_ARRAY(uniqColIdx, numCols); READ_OPERATOROID_ARRAY(uniqOperators, numCols); #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { + IF_EXIST (uniq_collations) { READ_OPERATOROID_ARRAY(uniq_collations, numCols); } #endif @@ -4430,7 +4438,7 @@ static ModifyTable* _readModifyTable(ModifyTable* local_node) READ_NODE_FIELD(rowMarks); READ_INT_FIELD(epqParam); READ_BOOL_FIELD(partKeyUpdated); - if (t_thrd.proc->workingVersionNum >= REPLACE_INTO_VERSION_NUM) { + IF_EXIST (isReplace) { READ_BOOL_FIELD(isReplace); } #ifdef PGXC @@ -4476,15 +4484,13 @@ static ModifyTable* _readModifyTable(ModifyTable* local_node) IF_EXIST(targetlists) { READ_NODE_FIELD(targetlists); } - if (t_thrd.proc->workingVersionNum >= SUPPORT_VIEW_AUTO_UPDATABLE) { + IF_EXIST (withCheckOptionLists) { READ_NODE_FIELD(withCheckOptionLists); } #ifdef USE_SPQ - if (t_thrd.proc->workingVersionNum >= SPQ_VERSION_NUM) { - IF_EXIST(isSplitUpdates) { - READ_NODE_FIELD(isSplitUpdates); + IF_EXIST (isSplitUpdates) { + READ_NODE_FIELD(isSplitUpdates); } - } #endif READ_DONE(); } @@ -4623,7 +4629,7 @@ static Group* _readGroup(Group* local_node) READ_ATTR_ARRAY(grpColIdx, numCols); READ_OPERATOROID_ARRAY(grpOperators, numCols); #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { + IF_EXIST (grp_collations) { READ_OPERATOROID_ARRAY(grp_collations, numCols); } #endif @@ -4689,7 +4695,7 @@ static HashJoin* _readHashJoin(HashJoin* local_node) read_mem_info(&local_node->mem_info); #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { + IF_EXIST (hash_collations) { READ_NODE_FIELD(hash_collations); } #endif @@ -5014,7 +5020,7 @@ static SetOp* _readSetOp(SetOp* local_node) READ_ATTR_ARRAY(dupColIdx, numCols); READ_OPERATOROID_ARRAY(dupOperators, numCols); #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) { + IF_EXIST (dup_collations) { READ_OPERATOROID_ARRAY(dup_collations, numCols); } #endif @@ -6016,8 +6022,10 @@ static IndexStmt* _readIndexStmt() READ_STRING_FIELD(accessMethod); READ_STRING_FIELD(tableSpace); READ_NODE_FIELD(indexParams); - if (t_thrd.proc->workingVersionNum >= SUPPORT_GPI_VERSION_NUM) { + IF_EXIST (indexIncludingParams) { READ_NODE_FIELD(indexIncludingParams); + } + IF_EXIST (isGlobal) { READ_BOOL_FIELD(isGlobal); } READ_NODE_FIELD(options); @@ -6074,7 +6082,7 @@ static Constraint* _readConstraint() } else if (MATCH_TYPE("DEFAULT")) { local_node->contype = CONSTR_DEFAULT; READ_NODE_FIELD(raw_expr); - if (t_thrd.proc->workingVersionNum >= ON_UPDATE_TIMESTAMP_VERSION_NUM) { + IF_EXIST (update_expr) { READ_NODE_FIELD(update_expr); } READ_STRING_FIELD(cooked_expr); @@ -6086,7 +6094,7 @@ static Constraint* _readConstraint() } else if (MATCH_TYPE("PRIMARY_KEY")) { local_node->contype = CONSTR_PRIMARY; READ_NODE_FIELD(keys); - if (t_thrd.proc->workingVersionNum >= SUPPORT_GPI_VERSION_NUM) { + IF_EXIST (including) { READ_NODE_FIELD(including); } READ_NODE_FIELD(options); @@ -6095,7 +6103,7 @@ static Constraint* _readConstraint() } else if (MATCH_TYPE("UNIQUE")) { local_node->contype = CONSTR_UNIQUE; READ_NODE_FIELD(keys); - if (t_thrd.proc->workingVersionNum >= SUPPORT_GPI_VERSION_NUM) { + IF_EXIST (including) { READ_NODE_FIELD(including); } READ_NODE_FIELD(options); @@ -6104,7 +6112,7 @@ static Constraint* _readConstraint() } else if (MATCH_TYPE("EXCLUSION")) { local_node->contype = CONSTR_EXCLUSION; READ_NODE_FIELD(exclusions); - if (t_thrd.proc->workingVersionNum >= SUPPORT_GPI_VERSION_NUM) { + IF_EXIST (including) { READ_NODE_FIELD(including); } READ_NODE_FIELD(options); @@ -6124,7 +6132,7 @@ static Constraint* _readConstraint() READ_NODE_FIELD(old_pktable_oid); READ_BOOL_FIELD(skip_validation); READ_BOOL_FIELD(initially_valid); - if (t_thrd.proc->workingVersionNum >= DISABLE_CONSTRAINT_VERSION_NUM) { + IF_EXIST (isdisable) { READ_BOOL_FIELD(isdisable); } } else if (MATCH_TYPE("CLUSTER")) { -- Gitee From f1bb6ed4cfdbae234154547f071f5f16313e0868 Mon Sep 17 00:00:00 2001 From: sundechao Date: Tue, 10 Sep 2024 10:46:16 +0800 Subject: [PATCH 280/347] =?UTF-8?q?=E4=BF=AE=E6=94=B9ustore=5Fverify=5Flev?= =?UTF-8?q?el=E9=BB=98=E8=AE=A4=E5=80=BC=E4=B8=BAfast,ustore=5Fverify=5Fmo?= =?UTF-8?q?dule=E4=B8=BAupage&ubtree=20=EF=BC=88cherry=20picked=20commit?= =?UTF-8?q?=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/misc/guc.cpp | 4 ++-- src/common/backend/utils/misc/guc/guc_sql.cpp | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index affd0be0bf..506e393fb0 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -14165,8 +14165,8 @@ static void InitUStoreAttr() u_sess->attr.attr_storage.enable_candidate_buf_usage_count = DEFAULT_CAND_LIST_USAGE_COUNT; u_sess->attr.attr_storage.ustats_tracker_naptime = DEFAULT_USTATS_TRACKER_NAPTIME; u_sess->attr.attr_storage.umax_search_length_for_prune = DEFAULT_UMAX_PRUNE_SEARCH_LEN; - u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_DEFAULT; - u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_INVALID; + u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_FAST; + u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_UPAGE | USTORE_VERIFY_MOD_UBTREE; u_sess->attr.attr_storage.enable_ustore_sync_rollback = DEFAULT_SYNC_ROLLBACK; u_sess->attr.attr_storage.enable_ustore_async_rollback = DEFAULT_ASYNC_ROLLBACK; u_sess->attr.attr_storage.enable_ustore_page_rollback = DEFAULT_PAGE_ROLLBACK; diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index c6e45d8e57..eb1840f0bb 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -4013,7 +4013,7 @@ static void ParseUstoreVerifyLevel(int* mLevel, char* ptoken, const char* pdelim } else if (strcasecmp(ptoken, "COMPLETE") == 0) { setVal = (int) USTORE_VERIFY_COMPLETE; } else { - setVal = USTORE_VERIFY_DEFAULT; + setVal = USTORE_VERIFY_FAST; ereport(LOG, (errmodule(MOD_GUC), errmsg("Invalid parameter settings, only support none, fast and complete value."))); } @@ -4043,7 +4043,7 @@ static void ParseUstoreVerifyModule(int* moduleVal, char* ptoken, const char* pd if (strcasecmp(ptoken, "ALL") == 0) { setVal = USTORE_VERIFY_MOD_MASK; } else if (strcasecmp(ptoken, "NULL") == 0) { - setVal = USTORE_VERIFY_MOD_INVALID; + setVal = USTORE_VERIFY_MOD_UPAGE | USTORE_VERIFY_MOD_UBTREE; } else if (strcasecmp(ptoken, "UPAGE") == 0) { setVal |= USTORE_VERIFY_MOD_UPAGE; } else if (strcasecmp(ptoken, "UBTREE") == 0) { @@ -4175,8 +4175,8 @@ static void ResetUstoreAttrValues() u_sess->attr.attr_storage.enable_ustore_sync_rollback = true; u_sess->attr.attr_storage.enable_ustore_async_rollback = true; u_sess->attr.attr_storage.enable_ustore_page_rollback = true; - u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_DEFAULT; - u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_INVALID; + u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_FAST; + u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_UPAGE | USTORE_VERIFY_MOD_UBTREE; u_sess->attr.attr_storage.index_trace_level = TRACE_NO; u_sess->attr.attr_storage.enable_log_tuple = false; } @@ -4205,10 +4205,10 @@ static void ResetPrevUstoreAttrSettings(bool status[]) u_sess->attr.attr_storage.enable_ustore_page_rollback = true; } if (!status[ENABLE_USTORE_VERIFY_LEVEL_IDX]) { - u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_DEFAULT; + u_sess->attr.attr_storage.ustore_verify_level = USTORE_VERIFY_FAST; } if (!status[ENABLE_USTORE_VERIFY_MODULE_IDX]) { - u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_INVALID; + u_sess->attr.attr_storage.ustore_verify_module = USTORE_VERIFY_MOD_UPAGE | USTORE_VERIFY_MOD_UBTREE; } if (!status[ENABLE_USTORE_TRACE_LEVEL_IDX]) { u_sess->attr.attr_storage.index_trace_level = TRACE_NO; -- Gitee From 8db8872ab3f99ab65cb04f369ed060f92334f18d Mon Sep 17 00:00:00 2001 From: lukeman Date: Tue, 10 Sep 2024 20:00:53 +0800 Subject: [PATCH 281/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9Ags=5Fpr?= =?UTF-8?q?obackup=E6=81=A2=E5=A4=8D=E5=AF=B9=E8=B1=A1=E5=AD=98=E5=82=A8?= =?UTF-8?q?=E4=B8=8A=E7=9A=84=E5=A4=87=E4=BB=BD=E9=9B=86=E6=97=B6=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E5=A4=87=E4=BB=BD=E6=96=87=E4=BB=B6=E6=81=A2=E5=A4=8D?= =?UTF-8?q?=E9=A1=BA=E5=BA=8F=E5=92=8C=E5=A4=87=E4=BB=BD=E9=A1=BA=E5=BA=8F?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/oss/thread.cpp | 45 +++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/src/bin/pg_probackup/oss/thread.cpp b/src/bin/pg_probackup/oss/thread.cpp index ea06bdfa9a..dd9e6a7256 100644 --- a/src/bin/pg_probackup/oss/thread.cpp +++ b/src/bin/pg_probackup/oss/thread.cpp @@ -407,6 +407,18 @@ int getFreeReaderThread() return slot; } +static int PbkFilenameCompare(const void *f1, const void *f2) +{ + char* filename1 = *(char**)f1; + char* filename2 = *(char**)f2; + size_t len1 = strlen(filename1); + size_t len2 = strlen(filename2); + if (len1 == len2) { + return strcmp(filename1, filename2); + } + return len1 - len2; +} + void* restoreReaderThreadMain(void* arg) { restoreReaderThreadArgs* args = (restoreReaderThreadArgs*)arg; @@ -420,34 +432,43 @@ void* restoreReaderThreadMain(void* arg) elog(ERROR, "bucket %s not found, please create it first", bucket_name ? bucket_name : "null"); } parray* objects = parray_new(); + parray* pbkObjects = parray_new(); oss->ListObjectsWithPrefix(bucket_name, prefix_name, objects); size_t objects_num = parray_num(objects); - size_t pbk_objects_num = 0; - for(size_t i = 0; i < objects_num; ++i) { + for (size_t i = 0; i < objects_num; ++i) { object_name = (char*)parray_get(objects, i); if (strncmp(object_name + strlen(object_name) - object_suffix_len, ".pbk", object_suffix_len) == 0) { - pbk_objects_num++; + parray_append(pbkObjects, object_name); } } - args->bufferCxt->fileNum = pbk_objects_num; + size_t pbkObjectsNum = parray_num(pbkObjects); + /* Sort by filename for restoring order */ + parray_qsort(pbkObjects, PbkFilenameCompare); + args->bufferCxt->fileNum = pbkObjectsNum; elog(INFO, "the total number of backup %s's file objects is %d, and pbk file objects is %d", - base36enc(args->dest_backup->start_time), objects_num, pbk_objects_num); - for(size_t i = 0; i < objects_num; ++i) { - if (args->bufferCxt->earlyExit) { - break; - } + base36enc(args->dest_backup->start_time), objects_num, pbkObjectsNum); + for (size_t i = 0; i < objects_num; ++i) { object_name = (char*)parray_get(objects, i); - elog(INFO, "download object: %s from s3", object_name); if (strncmp(object_name + strlen(object_name) - object_suffix_len, ".pbk", object_suffix_len) == 0) { - args->bufferCxt->fileEnd = false; - oss->GetObject(bucket_name, object_name, (void*)args->bufferCxt); + continue; } else { + elog(INFO, "download object: %s from s3", object_name); char file_name[MAXPGPATH]; int rc = snprintf_s(file_name, MAXPGPATH, MAXPGPATH - 1, "/%s", object_name); securec_check_ss_c(rc, "\0", "\0"); oss->GetObject(bucket_name, object_name, (char*)file_name); } } + for (size_t i = 0; i < pbkObjectsNum; ++i) { + if (args->bufferCxt->earlyExit) { + break; + } + object_name = (char*)parray_get(pbkObjects, i); + elog(INFO, "download object: %s from s3", object_name); + args->bufferCxt->fileEnd = false; + oss->GetObject(bucket_name, object_name, (void*)args->bufferCxt); + } parray_free(objects); + parray_free(pbkObjects); return NULL; } \ No newline at end of file -- Gitee From 67f332fbe75a9a6cff302738e28328b69a5cd941 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Mon, 9 Sep 2024 21:39:17 +0800 Subject: [PATCH 282/347] =?UTF-8?q?=E6=94=AF=E6=8C=81FOREIGN=5FKEY=5FCHECK?= =?UTF-8?q?S=E5=8F=82=E6=95=B0=20=EF=BC=88cherry=20picked=20commit=20from?= =?UTF-8?q?=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + src/common/backend/utils/misc/guc.cpp | 14 ++++++++++++++ src/include/knl/knl_guc/knl_session_attr_common.h | 1 + 3 files changed, 16 insertions(+) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index fc3e2166b4..4ff580696a 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -573,6 +573,7 @@ ustore_attr|string|0,0|NULL|NULL| enable_ustore|bool|0,0|NULL|Enable to create ustore table| enable_default_ustore_table|bool|0,0|NULL|Enable to create ustore table by default| enable_gtt_concurrent_truncate|bool|0,0|NULL|Enable concurrent truncate table for GTT| +foreign_key_checks|bool|0,0|NULL|Enable foreign key check on insert, update or drop operation,only applicable to b-format db.| reserve_space_for_nullable_atts|bool|0,0|NULL|Enable reserve space for nullable attributes, only applicable to ustore| undo_space_limit_size|int|819200,17179869184|kB|Maximum physical space of the undo command| undo_limit_size_per_transaction|int|2048,17179869184|kB|Maximum space for allocating undo resources in a transaction| diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 506e393fb0..8731090217 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -2112,6 +2112,20 @@ static void InitConfigureNamesBool() NULL, NULL }, + {{"foreign_key_checks", + PGC_USERSET, + NODE_ALL, + QUERY_TUNING, + gettext_noop("Enable foreign key check on insert, update or drop operation," + "only applicable to b-format db." + ), + NULL}, + &u_sess->attr.attr_common.foreign_key_checks, + true, + NULL, + NULL, + NULL + }, /* End-of-list marker */ {{NULL, (GucContext)0, diff --git a/src/include/knl/knl_guc/knl_session_attr_common.h b/src/include/knl/knl_guc/knl_session_attr_common.h index 0ba000ec68..9e90891187 100644 --- a/src/include/knl/knl_guc/knl_session_attr_common.h +++ b/src/include/knl/knl_guc/knl_session_attr_common.h @@ -249,6 +249,7 @@ typedef struct knl_session_attr_common { bool b_compatibility_user_host_auth; int time_record_level; bool enable_record_nettime; + bool foreign_key_checks; } knl_session_attr_common; #endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_COMMON_H_ */ -- Gitee From 091ed11fe7ef5f653df43987a48a707f9d43043a Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Tue, 10 Sep 2024 23:34:01 +0800 Subject: [PATCH 283/347] =?UTF-8?q?=E8=B0=83=E6=95=B4=E6=8C=89=E9=9C=80?= =?UTF-8?q?=E5=9B=9E=E6=94=BE=E6=9C=80=E5=A4=A7=E7=9A=84maxredoitem?= =?UTF-8?q?=E6=95=B0=E9=87=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/transam/ondemand_extreme_rto/dispatcher.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index 3c96cc7b10..de1db0ab24 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -101,7 +101,7 @@ static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; static const int32 MAX_PENDING_STANDBY = 1; -static const int32 ITEM_QUQUE_SIZE_RATIO = 16; +static const int32 ITEM_QUQUE_SIZE_RATIO = 2; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ uint32 g_readManagerTriggerFlag = TRIGGER_NORMAL; -- Gitee From 494b4e135401ab87c0c4ba40aa68529ab2e98c6a Mon Sep 17 00:00:00 2001 From: zzh Date: Wed, 11 Sep 2024 09:03:56 +0800 Subject: [PATCH 284/347] =?UTF-8?q?pg=5Fhba.conf=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E7=BC=BA=E5=B0=91=E7=99=BD=E5=90=8D=E5=8D=95=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docker/dockerfiles/5.0.0/entrypoint.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/dockerfiles/5.0.0/entrypoint.sh b/docker/dockerfiles/5.0.0/entrypoint.sh index d795277818..c1477bf11b 100644 --- a/docker/dockerfiles/5.0.0/entrypoint.sh +++ b/docker/dockerfiles/5.0.0/entrypoint.sh @@ -259,6 +259,7 @@ opengauss_setup_hba_conf() { echo "host replication repuser $OG_SUBNET trust" fi } >> "$PGDATA/pg_hba.conf" + sed -i "/# IPv6 local connections:/a host all omm $OG_SUBNET trust" $PGDATA/pg_hba.conf } # append parameter to postgres.conf for connections -- Gitee From e6640f753775f0926908d87a35d7cfeecf6eacd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=BE=BE=E6=A0=87?= <848833284@qq.com> Date: Wed, 11 Sep 2024 09:49:33 +0800 Subject: [PATCH 285/347] =?UTF-8?q?fix=20analyze=20verify=20error=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/commands/verify.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp index be98fb6126..4307a7d34d 100644 --- a/src/gausskernel/optimizer/commands/verify.cpp +++ b/src/gausskernel/optimizer/commands/verify.cpp @@ -1886,6 +1886,9 @@ static void VerifyUstorePage(Relation rel, Page page, BlockNumber blkno, VerifyL if (!RelationIsUstoreIndex(rel) && !RelationIsUstoreFormat(rel)) { return; } + if (PageIsNew(page) || (RelationIsUstoreIndex(rel) && blkno == 0)) { + return; + } int prevLevel = u_sess->attr.attr_storage.ustore_verify_level; u_sess->attr.attr_storage.ustore_verify_level = level; PG_TRY(); -- Gitee From 44a910597c1c9fa23d5bdfb7559a63663c889cce Mon Sep 17 00:00:00 2001 From: cchen676 Date: Tue, 10 Sep 2024 11:08:15 +0800 Subject: [PATCH 286/347] =?UTF-8?q?=E6=8B=A6=E6=88=AA=E6=8C=89=E9=9C=80?= =?UTF-8?q?=E5=9B=9E=E6=94=BEredo=E9=98=B6=E6=AE=B5=E5=8F=AF=E8=83=BD?= =?UTF-8?q?=E5=87=BA=E7=8E=B0=E7=9A=84lsn=E6=A0=A1=E9=AA=8C=E5=A4=B1?= =?UTF-8?q?=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp | 4 ++-- .../storage/access/common/heaptuple.cpp | 2 +- src/gausskernel/storage/access/transam/clog.cpp | 4 ++++ .../transam/ondemand_extreme_rto/redo_utils.cpp | 5 +++++ src/gausskernel/storage/access/transam/xlog.cpp | 16 +++++++++++----- .../storage/access/transam/xlogutils.cpp | 3 +++ 6 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp index 24a4d775da..2f4bcc528a 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp @@ -1138,7 +1138,7 @@ static bool LineOffChanged(Buffer buf, int lp_offset, uint16 saved_off) int output_backup = t_thrd.postgres_cxt.whereToSendOutput; t_thrd.postgres_cxt.whereToSendOutput = DestNone; BufferDesc *buf_desc = GetBufferDescriptor(buf); - ereport(WARNING, (errmsg("[%d/%d/%d/%d/%d %d-%d] lineoffchanged new %d/%d/%d old %d!", + ereport(DEBUG1, (errmsg("[%d/%d/%d/%d/%d %d-%d] lineoffchanged new %d/%d/%d old %d!", buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, (int)buf_desc->tag.rnode.bucketNode, (int)buf_desc->tag.rnode.opt, buf_desc->tag.forkNum, buf_desc->tag.blockNum, lpp->lp_off, lpp->lp_flags, lpp->lp_len, saved_off))); @@ -1169,7 +1169,7 @@ void ForgetBufferNeedCheckPin(Buffer buf_id) BufferDesc *buf_desc = GetBufferDescriptor(buf_id - 1); int output_backup = t_thrd.postgres_cxt.whereToSendOutput; t_thrd.postgres_cxt.whereToSendOutput = DestNone; - ereport(WARNING, (errmsg("[%d/%d/%d/%d/%d %d-%d] ForgetBufferNeedCheckPin %d!", + ereport(DEBUG1, (errmsg("[%d/%d/%d/%d/%d %d-%d] ForgetBufferNeedCheckPin %d!", buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, (int)buf_desc->tag.rnode.bucketNode, (int)buf_desc->tag.rnode.opt, buf_desc->tag.forkNum, buf_desc->tag.blockNum, count))); diff --git a/src/gausskernel/storage/access/common/heaptuple.cpp b/src/gausskernel/storage/access/common/heaptuple.cpp index 2f4c610248..7ecf8bcbe4 100644 --- a/src/gausskernel/storage/access/common/heaptuple.cpp +++ b/src/gausskernel/storage/access/common/heaptuple.cpp @@ -3565,7 +3565,7 @@ void heap_slot_store_heap_tuple(Tuple tup, TupleTableSlot* slot, Buffer buffer, int output_backup = t_thrd.postgres_cxt.whereToSendOutput; t_thrd.postgres_cxt.whereToSendOutput = DestNone; BufferDesc *buf_desc = GetBufferDescriptor(slot->tts_buffer - 1); - ereport(WARNING, (errmsg("[%d/%d/%d/%d/%d %d-%d] set check pin count, heap_slot_store_heap!", + ereport(DEBUG1, (errmsg("[%d/%d/%d/%d/%d %d-%d] set check pin count, heap_slot_store_heap!", buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, (int)buf_desc->tag.rnode.bucketNode, (int)buf_desc->tag.rnode.opt, buf_desc->tag.forkNum, buf_desc->tag.blockNum))); diff --git a/src/gausskernel/storage/access/transam/clog.cpp b/src/gausskernel/storage/access/transam/clog.cpp index dcadf9fcd6..fdb7b13d9a 100644 --- a/src/gausskernel/storage/access/transam/clog.cpp +++ b/src/gausskernel/storage/access/transam/clog.cpp @@ -1083,6 +1083,10 @@ void TruncateCLOG(TransactionId oldestXact) */ static void WriteZeroPageXlogRec(int64 pageno) { + if (SS_STANDBY_MODE) { + return; + } + XLogBeginInsert(); XLogRegisterData((char *)(&pageno), sizeof(int64)); (void)XLogInsert(RM_CLOG_ID, CLOG_ZEROPAGE); diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index b066c3bec6..37e696c08b 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -619,6 +619,11 @@ XLogRecParseState *OndemandRedoReloadXLogRecord(XLogRecParseState *hashmapBlockS recordBlockState->blockparse.blockhead.bucketNode, recordBlockState->blockparse.blockhead.forknum, recordBlockState->blockparse.blockhead.blkno))); } + + if (SS_IN_ONDEMAND_RECOVERY && t_thrd.role == WORKER) { + t_thrd.xlog_cxt.current_redo_xlog_lsn = hashmapBlockState->blockparse.blockhead.start_ptr; + } + return targetState; } diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 22af099b58..fc94850d5c 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -5164,7 +5164,7 @@ static void PreallocXlogFiles(XLogRecPtr endptr) * 1. In ss repplication dorado cluster, standby cluster sync primary xlog * 2. In ondemand recovery, we do not preallocate xlog files for better rto */ - if (SS_DORADO_STANDBY_CLUSTER || SS_IN_ONDEMAND_RECOVERY) { + if (SS_DORADO_STANDBY_CLUSTER || SS_IN_ONDEMAND_RECOVERY || SS_STANDBY_MODE) { return; } @@ -11110,9 +11110,11 @@ void StartupXLOG(void) */ /* for primary node, calculate the first multi version snapshot. */ - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); - CalculateLocalLatestSnapshot(true); - LWLockRelease(ProcArrayLock); + if (!SS_STANDBY_MODE) { + LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + CalculateLocalLatestSnapshot(true); + LWLockRelease(ProcArrayLock); + } if (SS_PERFORMING_SWITCHOVER && g_instance.dms_cxt.SSClusterState == NODESTATE_STANDBY_PROMOTING) { ereport(LOG, (errmsg("[SS switchover] Standby promote: StartupXLOG finished, promote success"))); @@ -11609,6 +11611,10 @@ bool HotStandbyActiveInReplay(void) */ bool XLogInsertAllowed(void) { + if (SS_CLUSTER_ONDEMAND_RECOVERY && SS_STANDBY_MODE) { + ereport(ERROR, (errmsg("[SS] SS Standby can't write xlog during recovery!"))); + } + // If value is "unconditionally true" or "unconditionally false", just // return it. This provides the normal fast path once recovery is known done. if (t_thrd.xlog_cxt.LocalXLogInsertAllowed >= 0) { @@ -14009,7 +14015,7 @@ void UpdateFullPageWrites(void) * Write an XLOG_FPW_CHANGE record. This allows us to keep track of * full_page_writes during archive recovery, if required. */ - if (XLogStandbyInfoActive() && !RecoveryInProgress()) { + if (XLogStandbyInfoActive() && !RecoveryInProgress() && !SS_STANDBY_MODE) { XLogBeginInsert(); XLogRegisterData((char *)(&u_sess->attr.attr_storage.fullPageWrites), sizeof(bool)); diff --git a/src/gausskernel/storage/access/transam/xlogutils.cpp b/src/gausskernel/storage/access/transam/xlogutils.cpp index 79f4ab3d87..aac7634ebf 100644 --- a/src/gausskernel/storage/access/transam/xlogutils.cpp +++ b/src/gausskernel/storage/access/transam/xlogutils.cpp @@ -78,6 +78,9 @@ typedef struct xl_invalid_page { static void report_invalid_page(int elevel, xl_invalid_page *invalid_page) { char *path = relpathperm(invalid_page->key.node, invalid_page->key.forkno); + if (SS_IN_ONDEMAND_RECOVERY && t_thrd.role == WORKER) { + elevel = PANIC; + } if (invalid_page->type == NOT_INITIALIZED) ereport(elevel, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), -- Gitee From 4ac0249a022bab17cb23258110934649fff1d754 Mon Sep 17 00:00:00 2001 From: hwhbj Date: Tue, 10 Sep 2024 10:40:51 +0800 Subject: [PATCH 287/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=83=A8=E5=88=86?= =?UTF-8?q?=E5=AF=B9=E8=B1=A1=E5=8D=87=E7=BA=A7=E5=9B=9E=E6=BB=9A=E5=90=8E?= =?UTF-8?q?=E5=85=83=E6=95=B0=E6=8D=AE=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98=20=EF=BC=88cherry=20picked=20commit=20from?= =?UTF-8?q?=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../rollback-post_catalog_maindb_92_945.sql | 156 +++++++++--------- .../rollback-post_catalog_otherdb_92_945.sql | 152 +++++++++-------- .../upgrade-post_catalog_maindb_92_945.sql | 6 +- .../upgrade-post_catalog_otherdb_92_945.sql | 6 +- 4 files changed, 160 insertions(+), 160 deletions(-) diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql index 1c5c292d3e..9038fab3a0 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_945.sql @@ -93,7 +93,7 @@ CREATE FUNCTION pg_catalog.get_instr_unique_sql OUT srt14_after_query bigint, OUT rtt_unknown bigint ) -RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; +RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'get_instr_unique_sql'; CREATE VIEW DBE_PERF.statement AS SELECT * FROM get_instr_unique_sql(); @@ -194,7 +194,7 @@ CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( srt14_after_query bigint, rtt_unknown bigint, parent_query_id bigint -); +) WITH (orientation=row, compression=no); REVOKE ALL on table pg_catalog.statement_history FROM public; create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); @@ -330,6 +330,8 @@ CREATE FUNCTION dbe_perf.standby_statement_history( OUT lwlock_wait_time bigint, OUT details bytea, OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, OUT net_send_time bigint, OUT srt1_q bigint, OUT srt2_simple_query bigint, @@ -448,7 +450,6 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp FOR row_name IN EXECUTE(query_str_nodes) LOOP query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''''; FOR row_data IN EXECUTE(query_str) LOOP - IF row_data.parent_query_id = 0 then node_name := row_name.node_name; db_name := row_data.db_name; schema_name := row_data.schema_name; @@ -520,10 +521,9 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp srt14_after_query := row_data.srt14_after_query; rtt_unknown := row_data.rtt_unknown; return next; - END IF; + END LOOP; END LOOP; - END LOOP; - return; + return; END; $$ LANGUAGE 'plpgsql' NOT FENCED; @@ -615,79 +615,77 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp FOR row_name IN EXECUTE(query_str_nodes) LOOP query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true'; FOR row_data IN EXECUTE(query_str) LOOP - IF row_data.parent_query_id = 0 THEN - node_name := row_name.node_name; - db_name := row_data.db_name; - schema_name := row_data.schema_name; - origin_node := row_data.origin_node; - user_name := row_data.user_name; - application_name := row_data.application_name; - client_addr := row_data.client_addr; - client_port := row_data.client_port; - unique_query_id := row_data.unique_query_id; - debug_query_id := row_data.debug_query_id; - query := row_data.query; - start_time := row_data.start_time; - finish_time := row_data.finish_time; - slow_sql_threshold := row_data.slow_sql_threshold; - transaction_id := row_data.transaction_id; - thread_id := row_data.thread_id; - session_id := row_data.session_id; - n_soft_parse := row_data.n_soft_parse; - n_hard_parse := row_data.n_hard_parse; - query_plan := row_data.query_plan; - n_returned_rows := row_data.n_returned_rows; - n_tuples_fetched := row_data.n_tuples_fetched; - n_tuples_returned := row_data.n_tuples_returned; - n_tuples_inserted := row_data.n_tuples_inserted; - n_tuples_updated := row_data.n_tuples_updated; - n_tuples_deleted := row_data.n_tuples_deleted; - n_blocks_fetched := row_data.n_blocks_fetched; - n_blocks_hit := row_data.n_blocks_hit; - db_time := row_data.db_time; - cpu_time := row_data.cpu_time; - execution_time := row_data.execution_time; - parse_time := row_data.parse_time; - plan_time := row_data.plan_time; - rewrite_time := row_data.rewrite_time; - pl_execution_time := row_data.pl_execution_time; - pl_compilation_time := row_data.pl_compilation_time; - data_io_time := row_data.data_io_time; - net_send_info := row_data.net_send_info; - net_recv_info := row_data.net_recv_info; - net_stream_send_info := row_data.net_stream_send_info; - net_stream_recv_info := row_data.net_stream_recv_info; - lock_count := row_data.lock_count; - lock_time := row_data.lock_time; - lock_wait_count := row_data.lock_wait_count; - lock_wait_time := row_data.lock_wait_time; - lock_max_count := row_data.lock_max_count; - lwlock_count := row_data.lwlock_count; - lwlock_wait_count := row_data.lwlock_wait_count; - lwlock_time := row_data.lwlock_time; - lwlock_wait_time := row_data.lwlock_wait_time; - details := row_data.details; - is_slow_sql := row_data.is_slow_sql; - trace_id := row_data.trace_id; - advise := row_data.advise; - net_send_time =row_data.net_send_time; - srt1_q := row_data.srt1_q; - srt2_simple_query := row_data.srt2_simple_query; - srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; - srt4_plan_query := row_data.srt4_plan_query; - srt5_light_query := row_data.srt5_light_query; - srt6_p := row_data.srt6_p; - srt7_b := row_data.srt7_b; - srt8_e := row_data.srt8_e; - srt9_d := row_data.srt9_d; - srt10_s := row_data.srt10_s; - srt11_c := row_data.srt11_c; - srt12_u := row_data.srt12_u; - srt13_before_query := row_data.srt13_before_query; - srt14_after_query := row_data.srt14_after_query; - rtt_unknown := row_data.rtt_unknown; - return next; - END IF; + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + return next; END LOOP; END LOOP; return; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql index 1c5c292d3e..ab53647d3e 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_945.sql @@ -93,7 +93,7 @@ CREATE FUNCTION pg_catalog.get_instr_unique_sql OUT srt14_after_query bigint, OUT rtt_unknown bigint ) -RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; +RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'get_instr_unique_sql'; CREATE VIEW DBE_PERF.statement AS SELECT * FROM get_instr_unique_sql(); @@ -194,7 +194,7 @@ CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( srt14_after_query bigint, rtt_unknown bigint, parent_query_id bigint -); +) WITH (orientation=row, compression=no); REVOKE ALL on table pg_catalog.statement_history FROM public; create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); @@ -330,6 +330,8 @@ CREATE FUNCTION dbe_perf.standby_statement_history( OUT lwlock_wait_time bigint, OUT details bytea, OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, OUT net_send_time bigint, OUT srt1_q bigint, OUT srt2_simple_query bigint, @@ -448,7 +450,6 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp FOR row_name IN EXECUTE(query_str_nodes) LOOP query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''''; FOR row_data IN EXECUTE(query_str) LOOP - IF row_data.parent_query_id = 0 then node_name := row_name.node_name; db_name := row_data.db_name; schema_name := row_data.schema_name; @@ -520,7 +521,6 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp srt14_after_query := row_data.srt14_after_query; rtt_unknown := row_data.rtt_unknown; return next; - END IF; END LOOP; END LOOP; return; @@ -615,79 +615,77 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp FOR row_name IN EXECUTE(query_str_nodes) LOOP query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true'; FOR row_data IN EXECUTE(query_str) LOOP - IF row_data.parent_query_id = 0 THEN - node_name := row_name.node_name; - db_name := row_data.db_name; - schema_name := row_data.schema_name; - origin_node := row_data.origin_node; - user_name := row_data.user_name; - application_name := row_data.application_name; - client_addr := row_data.client_addr; - client_port := row_data.client_port; - unique_query_id := row_data.unique_query_id; - debug_query_id := row_data.debug_query_id; - query := row_data.query; - start_time := row_data.start_time; - finish_time := row_data.finish_time; - slow_sql_threshold := row_data.slow_sql_threshold; - transaction_id := row_data.transaction_id; - thread_id := row_data.thread_id; - session_id := row_data.session_id; - n_soft_parse := row_data.n_soft_parse; - n_hard_parse := row_data.n_hard_parse; - query_plan := row_data.query_plan; - n_returned_rows := row_data.n_returned_rows; - n_tuples_fetched := row_data.n_tuples_fetched; - n_tuples_returned := row_data.n_tuples_returned; - n_tuples_inserted := row_data.n_tuples_inserted; - n_tuples_updated := row_data.n_tuples_updated; - n_tuples_deleted := row_data.n_tuples_deleted; - n_blocks_fetched := row_data.n_blocks_fetched; - n_blocks_hit := row_data.n_blocks_hit; - db_time := row_data.db_time; - cpu_time := row_data.cpu_time; - execution_time := row_data.execution_time; - parse_time := row_data.parse_time; - plan_time := row_data.plan_time; - rewrite_time := row_data.rewrite_time; - pl_execution_time := row_data.pl_execution_time; - pl_compilation_time := row_data.pl_compilation_time; - data_io_time := row_data.data_io_time; - net_send_info := row_data.net_send_info; - net_recv_info := row_data.net_recv_info; - net_stream_send_info := row_data.net_stream_send_info; - net_stream_recv_info := row_data.net_stream_recv_info; - lock_count := row_data.lock_count; - lock_time := row_data.lock_time; - lock_wait_count := row_data.lock_wait_count; - lock_wait_time := row_data.lock_wait_time; - lock_max_count := row_data.lock_max_count; - lwlock_count := row_data.lwlock_count; - lwlock_wait_count := row_data.lwlock_wait_count; - lwlock_time := row_data.lwlock_time; - lwlock_wait_time := row_data.lwlock_wait_time; - details := row_data.details; - is_slow_sql := row_data.is_slow_sql; - trace_id := row_data.trace_id; - advise := row_data.advise; - net_send_time =row_data.net_send_time; - srt1_q := row_data.srt1_q; - srt2_simple_query := row_data.srt2_simple_query; - srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; - srt4_plan_query := row_data.srt4_plan_query; - srt5_light_query := row_data.srt5_light_query; - srt6_p := row_data.srt6_p; - srt7_b := row_data.srt7_b; - srt8_e := row_data.srt8_e; - srt9_d := row_data.srt9_d; - srt10_s := row_data.srt10_s; - srt11_c := row_data.srt11_c; - srt12_u := row_data.srt12_u; - srt13_before_query := row_data.srt13_before_query; - srt14_after_query := row_data.srt14_after_query; - rtt_unknown := row_data.rtt_unknown; - return next; - END IF; + node_name := row_name.node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + advise := row_data.advise; + net_send_time =row_data.net_send_time; + srt1_q := row_data.srt1_q; + srt2_simple_query := row_data.srt2_simple_query; + srt3_analyze_rewrite := row_data.srt3_analyze_rewrite; + srt4_plan_query := row_data.srt4_plan_query; + srt5_light_query := row_data.srt5_light_query; + srt6_p := row_data.srt6_p; + srt7_b := row_data.srt7_b; + srt8_e := row_data.srt8_e; + srt9_d := row_data.srt9_d; + srt10_s := row_data.srt10_s; + srt11_c := row_data.srt11_c; + srt12_u := row_data.srt12_u; + srt13_before_query := row_data.srt13_before_query; + srt14_after_query := row_data.srt14_after_query; + rtt_unknown := row_data.rtt_unknown; + return next; END LOOP; END LOOP; return; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql index 7f7aa55807..7a3f2fa7f0 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_945.sql @@ -94,7 +94,7 @@ CREATE FUNCTION pg_catalog.get_instr_unique_sql OUT rtt_unknown bigint, OUT net_trans_time bigint ) -RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; +RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'get_instr_unique_sql'; CREATE VIEW DBE_PERF.statement AS SELECT * FROM get_instr_unique_sql(); @@ -196,7 +196,7 @@ CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( rtt_unknown bigint, parent_query_id bigint, net_trans_time bigint -); +) WITH (orientation=row, compression=no); REVOKE ALL on table pg_catalog.statement_history FROM public; create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); @@ -333,6 +333,8 @@ CREATE FUNCTION dbe_perf.standby_statement_history( OUT lwlock_wait_time bigint, OUT details bytea, OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, OUT net_send_time bigint, OUT srt1_q bigint, OUT srt2_simple_query bigint, diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql index 7f7aa55807..7a3f2fa7f0 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_945.sql @@ -94,7 +94,7 @@ CREATE FUNCTION pg_catalog.get_instr_unique_sql OUT rtt_unknown bigint, OUT net_trans_time bigint ) -RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'get_instr_unique_sql'; +RETURNS setof record LANGUAGE INTERNAL STABLE NOT FENCED as 'get_instr_unique_sql'; CREATE VIEW DBE_PERF.statement AS SELECT * FROM get_instr_unique_sql(); @@ -196,7 +196,7 @@ CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( rtt_unknown bigint, parent_query_id bigint, net_trans_time bigint -); +) WITH (orientation=row, compression=no); REVOKE ALL on table pg_catalog.statement_history FROM public; create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean); @@ -333,6 +333,8 @@ CREATE FUNCTION dbe_perf.standby_statement_history( OUT lwlock_wait_time bigint, OUT details bytea, OUT is_slow_sql boolean, + OUT trace_id text, + OUT advise text, OUT net_send_time bigint, OUT srt1_q bigint, OUT srt2_simple_query bigint, -- Gitee From a1b6de02211b238ebc8a76f56d1119445d56c145 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Mon, 9 Sep 2024 21:27:49 +0800 Subject: [PATCH 288/347] =?UTF-8?q?=E8=B0=83=E6=95=B4=E9=83=A8=E5=88=86?= =?UTF-8?q?=E6=A0=A1=E9=AA=8C=E4=B8=8E=E5=88=9D=E5=A7=8B=E5=8C=96=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/initdb/initdb.cpp | 10 ++---- .../process/postmaster/pagewriter.cpp | 36 +++++++++---------- .../storage/smgr/segment/segbuffer.cpp | 4 +-- 3 files changed, 23 insertions(+), 27 deletions(-) diff --git a/src/bin/initdb/initdb.cpp b/src/bin/initdb/initdb.cpp index 737e7b7c99..42b217b3e6 100644 --- a/src/bin/initdb/initdb.cpp +++ b/src/bin/initdb/initdb.cpp @@ -3931,14 +3931,14 @@ static void parse_vgname_args(char* args) vgname = xstrdup(args); enable_dss = true; if (strstr(vgname, "/") != NULL) { - fprintf(stderr, "invalid token \"/\" in vgname"); + fprintf(stderr, "invalid token \"/\" in vgname\n"); exit(1); } char *comma = strstr(vgname, ","); if (comma == NULL) { vgdata = vgname; - vglog = (char *)""; + vglog = vgname; return; } @@ -3946,11 +3946,7 @@ static void parse_vgname_args(char* args) comma = strstr(vgdata, ","); comma[0] = '\0'; vglog = comma + 1; - if (strstr(vgdata, ",") != NULL) { - fprintf(stderr, "invalid vgname args, should be two volume group names, example: \"+data,+log\""); - exit(1); - } - if (strstr(vglog, ",") != NULL) { + if (strstr(vgdata, ",") != NULL || strstr(vglog, ",") != NULL) { fprintf(stderr, "invalid vgname args, should be two volume group names, example: \"+data,+log\""); exit(1); } diff --git a/src/gausskernel/process/postmaster/pagewriter.cpp b/src/gausskernel/process/postmaster/pagewriter.cpp index 4c4f0dfc22..6c9c35ae11 100755 --- a/src/gausskernel/process/postmaster/pagewriter.cpp +++ b/src/gausskernel/process/postmaster/pagewriter.cpp @@ -1657,6 +1657,23 @@ static void incre_ckpt_aio_callback(struct io_event *event) _exit(0); } + off_t roffset = 0; + if (IsSegmentBufferID(buf_desc->buf_id)) { + roffset = ((buf_desc->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; + } else { + roffset = ((buf_desc->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; + } + + int aioRet = dss_aio_post_pwrite(event->obj->data, tempAioExtra->aio_fd, event->obj->u.c.nbytes, roffset); + if (aioRet != 0) { + ereport(PANIC, (errmsg("failed to post write by asnyc io (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", errno, + buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, + (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum))); + } + + buf_desc->extra->aio_in_progress = false; + #ifdef USE_ASSERT_CHECKING char *write_buf = (char *)(event->obj->u.c.buf); char *origin_buf = (char *)palloc(BLCKSZ + ALIGNOF_BUFFER); @@ -1669,10 +1686,9 @@ static void incre_ckpt_aio_callback(struct io_event *event) buf_desc->extra->seg_fileno, buf_desc->tag.forkNum, buf_desc->extra->seg_blockno, (char *)read_buf); } - if (XLByteEQ(PageGetLSN(read_buf), PageGetLSN(write_buf))) { Assert(memcmp(write_buf, read_buf, BLCKSZ) == 0); - } else if (XLByteLT(PageGetLSN(read_buf), PageGetLSN(write_buf))) { + } else if (!PageIsNew(read_buf) && XLByteLT(PageGetLSN(read_buf), PageGetLSN(write_buf))) { ereport(PANIC, (errmsg("[SS][%d/%d/%d/%d/%d %d-%d]aio write error", buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, @@ -1691,22 +1707,6 @@ static void incre_ckpt_aio_callback(struct io_event *event) pfree(origin_buf); #endif - off_t roffset = 0; - if (IsSegmentBufferID(buf_desc->buf_id)) { - roffset = ((buf_desc->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; - } else { - roffset = ((buf_desc->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; - } - - int aioRet = dss_aio_post_pwrite(event->obj->data, tempAioExtra->aio_fd, event->obj->u.c.nbytes, roffset); - if (aioRet != 0) { - ereport(PANIC, (errmsg("failed to post write by asnyc io (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", errno, - buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, - (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum))); - } - - buf_desc->extra->aio_in_progress = false; UnpinBuffer(buf_desc, true); } diff --git a/src/gausskernel/storage/smgr/segment/segbuffer.cpp b/src/gausskernel/storage/smgr/segment/segbuffer.cpp index 2b9def81bc..b1a7d6880b 100644 --- a/src/gausskernel/storage/smgr/segment/segbuffer.cpp +++ b/src/gausskernel/storage/smgr/segment/segbuffer.cpp @@ -323,7 +323,7 @@ void SegFlushCheckDiskLSN(SegSpace *spc, RelFileNode rNode, ForkNumber forknum, BufferDesc *buf_desc, char *buf) { #ifndef USE_ASSERT_CHECKING - if (!IsInitdb && !RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && + if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DMS && !SS_DISASTER_STANDBY_CLUSTER && !g_instance.dms_cxt.SSRecoveryInfo.disaster_cluster_promoting) { dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(buf_desc->buf_id); XLogRecPtr lsn_on_mem = PageGetLSN(buf); @@ -336,7 +336,7 @@ void SegFlushCheckDiskLSN(SegSpace *spc, RelFileNode rNode, ForkNumber forknum, } } #else - if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DSS && ENABLE_VERIFY_PAGE_VERSION && + if (!RecoveryInProgress() && !SS_IN_ONDEMAND_RECOVERY && ENABLE_DMS && ENABLE_VERIFY_PAGE_VERSION && !SS_DISASTER_STANDBY_CLUSTER && !g_instance.dms_cxt.SSRecoveryInfo.disaster_cluster_promoting) { char *origin_buf = (char *)palloc(BLCKSZ + ALIGNOF_BUFFER); char *temp_buf = (char *)BUFFERALIGN(origin_buf); -- Gitee From 3f5ec8615f753ca8d5ad2fb73e0e76cbd05ae63e Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Tue, 10 Sep 2024 14:14:23 +0800 Subject: [PATCH 289/347] =?UTF-8?q?=E8=B5=84=E6=BA=90=E6=B1=A0=E5=8C=96?= =?UTF-8?q?=E5=8D=95=E9=9B=86=E7=BE=A4=E7=A6=81=E6=AD=A2build?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_ctl/pg_ctl.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/bin/pg_ctl/pg_ctl.cpp b/src/bin/pg_ctl/pg_ctl.cpp index d04e4f41a5..dfc784d0f1 100755 --- a/src/bin/pg_ctl/pg_ctl.cpp +++ b/src/bin/pg_ctl/pg_ctl.cpp @@ -7129,6 +7129,10 @@ int main(int argc, char** argv) break; #endif case BUILD_COMMAND: + if (enable_dss && !ss_instance_config.dss.enable_stream && !ss_instance_config.dss.enable_dorado) { + pg_log(PG_PROGRESS, _("build command is not supported in share storage single cluster\n")); + goto Error; + } if (build_mode == COPY_SECURE_FILES_BUILD && (conn_str == NULL || register_username == NULL || register_password == NULL)) { pg_log(PG_PROGRESS, _("When copy secure files from remote, need remote host and authentication!\n")); -- Gitee From 0a66e218ab3267bcd4df389cc6079321331af7d6 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Wed, 11 Sep 2024 16:09:03 +0800 Subject: [PATCH 290/347] =?UTF-8?q?=E5=8F=8C=E9=9B=86=E7=BE=A4=E8=B7=B3?= =?UTF-8?q?=E8=BF=87=E9=83=A8=E5=88=86=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ddes/adapter/ss_dms_callback.cpp | 32 ++++++++++++++----- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 164b104d9b..6a6c40bdcf 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -962,10 +962,18 @@ static void CBVerifyPage(dms_buf_ctrl_t *buf_ctrl, char *new_page) /* latest page must satisfy condition: page lsn_on_disk bigger than transfered page which is latest page */ if ((lsn_now != InvalidXLogRecPtr) && XLByteLT(lsn_now, buf_ctrl->lsn_on_disk)) { - ereport(PANIC, (errmsg("[%d/%d/%d/%d/%d %d-%d] now lsn(0x%llx) is less than lsn_on_disk(0x%llx)", - rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum, - (unsigned long long)lsn_now, (unsigned long long)buf_ctrl->lsn_on_disk))); + if (SS_DISASTER_STANDBY_CLUSTER) { + ereport(WARNING, (errmsg("[%d/%d/%d/%d/%d %d-%d] now lsn(0x%llx) is less than lsn_on_disk(0x%llx)", + rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum, + (unsigned long long)lsn_now, (unsigned long long)buf_ctrl->lsn_on_disk))); + return; + } else { + ereport(PANIC, (errmsg("[%d/%d/%d/%d/%d %d-%d] now lsn(0x%llx) is less than lsn_on_disk(0x%llx)", + rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum, + (unsigned long long)lsn_now, (unsigned long long)buf_ctrl->lsn_on_disk))); + } } /* we only verify segment-page version */ @@ -975,10 +983,18 @@ static void CBVerifyPage(dms_buf_ctrl_t *buf_ctrl, char *new_page) if ((lsn_now != InvalidXLogRecPtr) && XLByteLT(lsn_now, lsn_past)) { RelFileNode rnode = buf_desc->tag.rnode; - ereport(PANIC, (errmodule(MOD_DMS), errmsg("[SS page][%d/%d/%d/%d/%d %d-%d] now lsn(0x%llx) is less than past lsn(0x%llx)", - rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum, - (unsigned long long)lsn_now, (unsigned long long)lsn_past))); + if (SS_DISASTER_STANDBY_CLUSTER) { + ereport(WARNING, (errmodule(MOD_DMS), errmsg("[SS page][%d/%d/%d/%d/%d %d-%d] now lsn(0x%llx) is less than past lsn(0x%llx)", + rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum, + (unsigned long long)lsn_now, (unsigned long long)lsn_past))); + return; + } else { + ereport(PANIC, (errmodule(MOD_DMS), errmsg("[SS page][%d/%d/%d/%d/%d %d-%d] now lsn(0x%llx) is less than past lsn(0x%llx)", + rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum, + (unsigned long long)lsn_now, (unsigned long long)lsn_past))); + } } return; } -- Gitee From 97606c5f748c6f234ebc7bd26269a0d8dd611e35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E6=A0=8B?= <1209756284@qq.com> Date: Wed, 11 Sep 2024 14:39:38 +0000 Subject: [PATCH 291/347] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!6287=20:=20=E8=B0=83=E6=95=B4=E6=8C=89=E9=9C=80=E5=9B=9E?= =?UTF-8?q?=E6=94=BE=E6=9C=80=E5=A4=A7=E7=9A=84maxredoitem=E6=95=B0?= =?UTF-8?q?=E9=87=8F'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/transam/ondemand_extreme_rto/dispatcher.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index de1db0ab24..3c96cc7b10 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -101,7 +101,7 @@ static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; static const int32 MAX_PENDING_STANDBY = 1; -static const int32 ITEM_QUQUE_SIZE_RATIO = 2; +static const int32 ITEM_QUQUE_SIZE_RATIO = 16; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ uint32 g_readManagerTriggerFlag = TRIGGER_NORMAL; -- Gitee From 4567180c9b8478b53a7e48aba561054328b5014e Mon Sep 17 00:00:00 2001 From: xushukun Date: Tue, 10 Sep 2024 09:21:27 +0000 Subject: [PATCH 292/347] =?UTF-8?q?=E3=80=90CMIT=E3=80=91FIX=20COPY=20PARA?= =?UTF-8?q?LLEL=20WITH=20HEADER=20MISS=20DATA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/psql/copy.cpp | 88 +++++++++++++++++++++++++++++ src/test/regress/expected/copy2.out | 31 ++++++++++ src/test/regress/sql/copy2.sql | 15 +++++ 3 files changed, 134 insertions(+) diff --git a/src/bin/psql/copy.cpp b/src/bin/psql/copy.cpp index 2c52063fd3..dc97060b20 100644 --- a/src/bin/psql/copy.cpp +++ b/src/bin/psql/copy.cpp @@ -173,6 +173,93 @@ static bool IsCopyOptionKeyWordExists(const char* source, const char* key) return false; } +static void RemoveOptionInParallelCopy(struct copy_options* options, char *src, const char *key) +{ + if (src == nullptr) { + return; + } + size_t srcLen = strlen(src); + size_t keyLen = strlen(key); + if (srcLen < keyLen) { + return; + } + char* upperSrc = (char*)pg_malloc(srcLen + 1); + char* upperKey = (char*)pg_malloc(keyLen + 1); + toUpper(upperSrc, src); + toUpper(upperKey, key); + + char *firstPos = strstr(upperSrc, upperKey); + char *curPos = firstPos; + size_t keyCnt = 0; + /* + * Check exist there are multiple keys or single keys. + * When there are multiple headers, we will not process it, + * the server will report an redundant options error. + * because if there are two keys now, after remove one key, + * there are only one key, an invalid SQL will become a valid SQL. + */ + while (curPos != nullptr) { + ++keyCnt; + if (keyCnt > 1) { + break; + } + curPos = curPos + keyLen; + curPos = strstr(curPos, upperKey); + } + + bool isKey = (keyCnt == 1); + + /* header option available only in CSV mode. */ + if (pg_strcasecmp(upperKey, "HEADER") == 0) { + char *csvPos = strstr(upperSrc, "CSV"); + isKey = isKey && (csvPos != nullptr); + } + + if (isKey) { + const char* whitespace = " \t\n\r"; + char *firstPosStart = firstPos; + char *firstPosEnd = firstPos + keyLen - 1; + while (--firstPosStart >= upperSrc) { + if (strchr(whitespace, *firstPosStart) == nullptr) { + break; + } + } + + while (++firstPosEnd <= upperSrc + srcLen) { + if (strchr(whitespace, *firstPosEnd) == nullptr) { + break; + } + } + + /* + * We will not handle situations where there are "," + * before and after the key. eg: "with (csv, header)" + */ + + if ((firstPosStart >= upperSrc && *firstPosStart == ',') || + (firstPosEnd <= upperSrc + srcLen && *firstPosEnd == ',')) { + isKey = false; + } + + /* + * We will handle situations like this: + * eg: "with csv header;" or "with csv header ..." + */ + isKey = isKey && (firstPos == upperSrc || strchr(whitespace, *(firstPos - 1)) != nullptr) && + (strchr(whitespace, *firstPosEnd) != nullptr || *firstPosEnd == ';' || *firstPosEnd == '\0'); + } + + if (keyCnt == 1 && isKey) { + options->hasHeader = true; + char *cpyDestPos = src + (firstPos - upperSrc); + char *cpySrcPos = cpyDestPos + keyLen; + errno_t rc = memmove_s(cpyDestPos, strlen(cpySrcPos) + 1, cpySrcPos, strlen(cpySrcPos) + 1); + securec_check_c(rc, "\0", "\0"); + } + free(upperKey); + free(upperSrc); +} + /* parse parallel settings */ static bool ParseParallelOption(struct copy_options* result, char** errToken) { @@ -204,6 +291,7 @@ static bool ParseParallelOption(struct copy_options* result, char** errToken) } token = strtokx(nullptr, "", NULL, NULL, 0, false, false, pset.encoding); + RemoveOptionInParallelCopy(result, token, "header"); if (token != nullptr) { xstrcat(&result->after_tofrom, " "); xstrcat(&result->after_tofrom, token); diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out index cb87764270..488e77358a 100644 --- a/src/test/regress/expected/copy2.out +++ b/src/test/regress/expected/copy2.out @@ -373,3 +373,34 @@ LINE 1: COPY copy_header_dest FROM STDIN csv header'on'; ^ drop table copy_header_src; drop table copy_header_dest; +create table copy_parallel_header_src (c1 int); +create table copy_parallel_header_dest (c1 int); +insert into copy_parallel_header_src select generate_series(1,100); +\copy copy_parallel_header_src to '~/copy_parallel_header_src.csv' with csv header; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header; +select count(*) from copy_parallel_header_dest; + count +------- + 100 +(1 row) + +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header header; +ERROR: conflicting or redundant options +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header'on'; +ERROR: syntax error at or near "'on'" +LINE 1: ... copy_parallel_header_dest FROM STDIN with csv header'on'; + ^ +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header,; +ERROR: syntax error at or near "," +LINE 1: ...OPY copy_parallel_header_dest FROM STDIN with csv header,; + ^ +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv, header; +ERROR: syntax error at or near "," +LINE 1: ...OPY copy_parallel_header_dest FROM STDIN with csv, header; + ^ +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with (csv, header); +ERROR: option "csv" not recognized +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with header; +ERROR: COPY HEADER available only in CSV mode +drop table copy_parallel_header_src; +drop table copy_parallel_header_dest; diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql index b77eba045f..4c9e886cec 100644 --- a/src/test/regress/sql/copy2.sql +++ b/src/test/regress/sql/copy2.sql @@ -280,3 +280,18 @@ insert into copy_header_src select generate_series(1,10); drop table copy_header_src; drop table copy_header_dest; + +create table copy_parallel_header_src (c1 int); +create table copy_parallel_header_dest (c1 int); +insert into copy_parallel_header_src select generate_series(1,100); +\copy copy_parallel_header_src to '~/copy_parallel_header_src.csv' with csv header; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header; +select count(*) from copy_parallel_header_dest; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header header; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header'on'; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv header,; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with csv, header; +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with (csv, header); +\copy copy_parallel_header_dest from '~/copy_parallel_header_src.csv' parallel 8 with header; +drop table copy_parallel_header_src; +drop table copy_parallel_header_dest; -- Gitee From 9d526dc226ea2cd796eb6b02fb8fb12b85b17bb9 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Thu, 12 Sep 2024 10:33:26 +0800 Subject: [PATCH 293/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8F=8C=E9=9B=86?= =?UTF-8?q?=E7=BE=A4=E5=B0=9D=E8=AF=95=E5=88=A0=E9=99=A4xlogtemp=E5=A4=B1?= =?UTF-8?q?=E8=B4=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index fc94850d5c..1b8a05dcb8 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -5240,6 +5240,11 @@ XLogSegNo XLogGetLastRemovedSegno(void) static void remove_xlogtemp_files(void) { + if (SS_DORADO_STANDBY_CLUSTER) { + /* in ss dorado standby cluster, we dont init or remove xlog files */ + ereport(LOG, (errmsg("ss dorado standby cluster skip remove xlogtemp"))); + return; + } DIR *dir = NULL; char fullpath[MAXPGPATH] = {0}; struct dirent *de = NULL; -- Gitee From 661b839d65c36e9d5c24b6d5c8b773bafcb935f1 Mon Sep 17 00:00:00 2001 From: yuchao Date: Tue, 27 Aug 2024 17:12:17 +0800 Subject: [PATCH 294/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddate=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E7=9B=B4=E6=8E=A5=E6=8F=92=E5=85=A5=E4=B8=8E=E4=BD=BF?= =?UTF-8?q?=E7=94=A8TO=5FDATE=E5=87=BD=E6=95=B0=E8=A1=8C=E4=B8=BA=E4=B8=8D?= =?UTF-8?q?=E4=B8=80=E8=87=B4=EF=BC=8C=E6=98=AF=E5=90=A6=E8=A6=81=E5=85=81?= =?UTF-8?q?=E8=AE=B8>9999=E5=80=BC=E7=9B=B4=E6=8E=A5=E6=8F=92=E5=85=A5?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=20=EF=BC=88cherry=20picked=20commit?= =?UTF-8?q?=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/adt/timestamp.cpp | 8 +++++- .../regress/expected/insert_date_to_date.out | 10 +++++++ .../regress/expected/mot/single_node_date.out | 27 +++++++++---------- .../regress/expected/single_node_date.out | 27 +++++++++---------- .../regress/output/cstore_cmpr_date.source | 24 ++++------------- src/test/regress/parallel_schedule0 | 2 +- src/test/regress/sql/insert_date_to_date.sql | 4 +++ 7 files changed, 51 insertions(+), 51 deletions(-) create mode 100644 src/test/regress/expected/insert_date_to_date.out create mode 100644 src/test/regress/sql/insert_date_to_date.sql diff --git a/src/common/backend/utils/adt/timestamp.cpp b/src/common/backend/utils/adt/timestamp.cpp index 0039c3a44a..993072e7ba 100644 --- a/src/common/backend/utils/adt/timestamp.cpp +++ b/src/common/backend/utils/adt/timestamp.cpp @@ -234,8 +234,14 @@ bool TimestampTypeCheck(char* str, bool can_ignore, struct pg_tm* tm, Timestamp if (tm2timestamp(tm, fsec, NULL, &result) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range: \"%s\"", str))); - break; + if (tm->tm_year < MIN_VALUE_YEAR || tm->tm_year > MAX_VALUE_YEAR || tm->tm_year == 0) { + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("invalid data for \"year = %d\", value must be between -4712 and 9999," \ + " and not be 0", tm->tm_year))); + } + break; case DTK_EPOCH: result = SetEpochTimestamp(); break; diff --git a/src/test/regress/expected/insert_date_to_date.out b/src/test/regress/expected/insert_date_to_date.out new file mode 100644 index 0000000000..0c29066c75 --- /dev/null +++ b/src/test/regress/expected/insert_date_to_date.out @@ -0,0 +1,10 @@ +create table tt(col1 date); +insert into tt values('10000-01-28'); +ERROR: invalid data for "year = 10000", value must be between -4712 and 9999, and not be 0 +LINE 1: insert into tt values('10000-01-28'); + ^ +CONTEXT: referenced column: col1 +insert into tt values(TO_DATE('10000-01-28','yyyy-mm-dd')); +ERROR: invalid data for "year = 10000" ,value must be between 1 and 9999, and not be 0 +CONTEXT: referenced column: col1 +drop table tt; diff --git a/src/test/regress/expected/mot/single_node_date.out b/src/test/regress/expected/mot/single_node_date.out index 807196b57e..186bea3968 100644 --- a/src/test/regress/expected/mot/single_node_date.out +++ b/src/test/regress/expected/mot/single_node_date.out @@ -955,11 +955,10 @@ SELECT EXTRACT(CENTURY FROM DATE '0100-12-31 BC'); -- -1 (1 row) SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -- -1 - date_part ------------ - -1 -(1 row) - +ERROR: invalid data for "year = 0", value must be between -4712 and 9999, and not be 0 +LINE 1: SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); + ^ +CONTEXT: referenced column: date_part SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); -- 1 date_part ----------- @@ -1006,11 +1005,10 @@ SELECT EXTRACT(CENTURY FROM CURRENT_DATE)>=21 AS True; -- true -- millennium -- SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); -- -1 - date_part ------------ - -1 -(1 row) - +ERROR: invalid data for "year = 0", value must be between -4712 and 9999, and not be 0 +LINE 1: SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); + ^ +CONTEXT: referenced column: date_part SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01 AD'); -- 1 date_part ----------- @@ -1070,11 +1068,10 @@ SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); -- 0 (1 row) SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); -- 0 - date_part ------------ - 0 -(1 row) - +ERROR: invalid data for "year = 0", value must be between -4712 and 9999, and not be 0 +LINE 1: SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); + ^ +CONTEXT: referenced column: date_part SELECT EXTRACT(DECADE FROM DATE '0002-12-31 BC'); -- -1 date_part ----------- diff --git a/src/test/regress/expected/single_node_date.out b/src/test/regress/expected/single_node_date.out index 38ff202252..03b7a9efe3 100644 --- a/src/test/regress/expected/single_node_date.out +++ b/src/test/regress/expected/single_node_date.out @@ -954,11 +954,10 @@ SELECT EXTRACT(CENTURY FROM DATE '0100-12-31 BC'); -- -1 (1 row) SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -- -1 - date_part ------------ - -1 -(1 row) - +ERROR: invalid data for "year = 0", value must be between -4712 and 9999, and not be 0 +LINE 1: SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); + ^ +CONTEXT: referenced column: date_part SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); -- 1 date_part ----------- @@ -1005,11 +1004,10 @@ SELECT EXTRACT(CENTURY FROM CURRENT_DATE)>=21 AS True; -- true -- millennium -- SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); -- -1 - date_part ------------ - -1 -(1 row) - +ERROR: invalid data for "year = 0", value must be between -4712 and 9999, and not be 0 +LINE 1: SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); + ^ +CONTEXT: referenced column: date_part SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01 AD'); -- 1 date_part ----------- @@ -1069,11 +1067,10 @@ SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); -- 0 (1 row) SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); -- 0 - date_part ------------ - 0 -(1 row) - +ERROR: invalid data for "year = 0", value must be between -4712 and 9999, and not be 0 +LINE 1: SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); + ^ +CONTEXT: referenced column: date_part SELECT EXTRACT(DECADE FROM DATE '0002-12-31 BC'); -- -1 date_part ----------- diff --git a/src/test/regress/output/cstore_cmpr_date.source b/src/test/regress/output/cstore_cmpr_date.source index 4bc4f1237b..5a3843656f 100644 --- a/src/test/regress/output/cstore_cmpr_date.source +++ b/src/test/regress/output/cstore_cmpr_date.source @@ -43,25 +43,11 @@ CREATE TABLE colcmpr_date_01 c date ) with (orientation = column) ; COPY colcmpr_date_01 FROM '@abs_srcdir@/data/colcmpr_date_01.data'; +ERROR: invalid data for "year = 294276", value must be between -4712 and 9999, and not be 0 +CONTEXT: COPY colcmpr_date_01, line 5, column c: "294276-01-01 AD" SELECT * FROM colcmpr_date_01; - distkey | a | b | c ----------+------------------------+------------------------+----------------------- - 1 | 4713-01-01 00:00:00 BC | 4713-01-01 00:00:00 BC | 0001-01-01 00:00:00 - 1 | 4713-01-01 00:00:00 BC | 4713-01-02 00:00:00 BC | 0001-01-02 00:00:00 - 1 | 4713-01-01 00:00:00 BC | 4713-01-03 00:00:00 BC | 0001-01-03 00:00:00 - 1 | 4713-01-01 00:00:00 BC | 4713-01-04 00:00:00 BC | 0001-01-04 00:00:00 - 1 | 4713-01-01 00:00:00 BC | 4713-01-05 00:00:00 BC | 294276-01-01 00:00:00 - 1 | 4713-01-01 00:00:00 BC | 4712-01-01 00:00:00 BC | 294276-02-01 00:00:00 - 1 | 4713-01-01 00:00:00 BC | 4711-01-01 00:00:00 BC | 294276-03-01 00:00:00 - 1 | 4713-12-31 00:00:00 BC | 4710-01-01 00:00:00 BC | 294276-04-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-01 00:00:00 | 294276-05-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-01 00:00:00 BC | 294276-07-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-02 00:00:00 BC | 294276-08-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-03 00:00:00 BC | 294276-09-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-04 00:00:00 BC | 294276-10-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-02 00:00:00 | 294276-11-01 00:00:00 - 1 | 294276-01-01 00:00:00 | 0001-01-03 00:00:00 | 294276-12-01 00:00:00 - 1 | 294276-12-31 00:00:00 | 0001-01-04 00:00:00 | 294276-12-31 00:00:00 -(16 rows) + distkey | a | b | c +---------+---+---+--- +(0 rows) DROP TABLE colcmpr_date_01; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 921478c21b..d77a3cf583 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -45,7 +45,7 @@ test: spm_adaptive_gplan test: smp smp_cursor parallel_enable_function test: alter_hw_package test: hw_grant_package gsc_func gsc_db -test: uppercase_attribute_name decode_compatible_with_o outerjoin_bugfix chr_gbk +test: uppercase_attribute_name decode_compatible_with_o outerjoin_bugfix chr_gbk insert_date_to_date test: replace_func_with_two_args trunc_func_for_date nlssort_pinyin updatable_views test_cursor_arg_defexpr # test multiple statistics diff --git a/src/test/regress/sql/insert_date_to_date.sql b/src/test/regress/sql/insert_date_to_date.sql new file mode 100644 index 0000000000..60fae22db5 --- /dev/null +++ b/src/test/regress/sql/insert_date_to_date.sql @@ -0,0 +1,4 @@ +create table tt(col1 date); +insert into tt values('10000-01-28'); +insert into tt values(TO_DATE('10000-01-28','yyyy-mm-dd')); +drop table tt; -- Gitee From eac925faf33ae57dee8a1dcc61b9581e4f5d20bc Mon Sep 17 00:00:00 2001 From: douxin Date: Sat, 31 Aug 2024 19:49:54 +0800 Subject: [PATCH 295/347] =?UTF-8?q?[bugfix]=20repair=20access=20a=20table?= =?UTF-8?q?=20through=20a=20synonym=20when=20no=20permissions=20=EF=BC=88c?= =?UTF-8?q?herry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/namespace.cpp | 4 + .../regress/expected/synonym_permission.out | 86 +++++++++++++++++++ src/test/regress/parallel_schedule0A | 1 + src/test/regress/sql/synonym_permission.sql | 48 +++++++++++ 4 files changed, 139 insertions(+) create mode 100644 src/test/regress/expected/synonym_permission.out create mode 100644 src/test/regress/sql/synonym_permission.sql diff --git a/src/common/backend/catalog/namespace.cpp b/src/common/backend/catalog/namespace.cpp index 5283da0631..6c05470fa2 100644 --- a/src/common/backend/catalog/namespace.cpp +++ b/src/common/backend/catalog/namespace.cpp @@ -305,6 +305,10 @@ Oid RangeVarGetRelidExtended(const RangeVar* relation, LOCKMODE lockmode, bool m if (isSupportSynonym) { pfree_ext(errDetail); errDetail = RelnameGetRelidExtended(relation->relname, &relId, refSynOid, detailInfo); + if (relId != NULL && OidIsValid(relId)) { + Oid namespaceId = get_rel_namespace(relId); + LookupExplicitNamespace(get_namespace_name(namespaceId)); + } } else { relId = RelnameGetRelid(relation->relname, detailInfo); } diff --git a/src/test/regress/expected/synonym_permission.out b/src/test/regress/expected/synonym_permission.out new file mode 100644 index 0000000000..1571f88491 --- /dev/null +++ b/src/test/regress/expected/synonym_permission.out @@ -0,0 +1,86 @@ +drop database db_1138120; +ERROR: database "db_1138120" does not exist +drop user user1_1138120; +ERROR: role "user1_1138120" does not exist +drop user user2_1138120; +ERROR: role "user2_1138120" does not exist +-- create database and user +CREATE DATABASE db_1138120; +\c db_1138120 +CREATE USER user1_1138120 PASSWORD 'Abc@1138120'; +grant all on database db_1138120 to user1_1138120; +CREATE USER user2_1138120 PASSWORD 'Abc@1138120'; +grant all on database db_1138120 to user2_1138120; +-- create synonym +create or replace synonym user2_1138120.syn1_1138120 for user1_1138120.tab_1138120; +-- \c - user1_1138120 +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +create table tab_1138120 (id int,name text); +insert into tab_1138120 values (1,'abc'); +select * from tab_1138120; + id | name +----+------ + 1 | abc +(1 row) + +-- \c - user2_1138120 +-- no permission for table and schema +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +ERROR: permission denied for schema user1_1138120 +LINE 1: select * from user1_1138120.tab_1138120; + ^ +DETAIL: N/A +select * from syn1_1138120; +ERROR: permission denied for schema user1_1138120 +LINE 1: select * from syn1_1138120; + ^ +DETAIL: N/A +-- add table permission +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +grant all privileges on table tab_1138120 to user2_1138120; +-- no permission for schema +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +ERROR: permission denied for schema user1_1138120 +LINE 1: select * from user1_1138120.tab_1138120; + ^ +DETAIL: N/A +select * from syn1_1138120; +ERROR: permission denied for schema user1_1138120 +LINE 1: select * from syn1_1138120; + ^ +DETAIL: N/A +-- add schema permission +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +grant usage on schema user1_1138120 to user2_1138120; +-- have permission for schema and table, query success +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; + id | name +----+------ + 1 | abc +(1 row) + +select * from syn1_1138120; + id | name +----+------ + 1 | abc +(1 row) + +-- revoke table permission +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +revoke all privileges on table tab_1138120 from user2_1138120; +-- no table permission +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +ERROR: permission denied for relation tab_1138120 +DETAIL: N/A +select * from syn1_1138120; +ERROR: permission denied for relation tab_1138120 +DETAIL: N/A +--clear +\c postgres +drop database db_1138120; +drop user user1_1138120; +drop user user2_1138120; diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 213015b877..36484c9189 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -294,6 +294,7 @@ test: single_node_triggers # Synonym tests #test: single_node_synonym test: synonym_conflict_test +test: synonym_permission # unsupported view tests test: single_node_unsupported_view diff --git a/src/test/regress/sql/synonym_permission.sql b/src/test/regress/sql/synonym_permission.sql new file mode 100644 index 0000000000..5ef867a969 --- /dev/null +++ b/src/test/regress/sql/synonym_permission.sql @@ -0,0 +1,48 @@ +drop database db_1138120; +drop user user1_1138120; +drop user user2_1138120; +-- create database and user +CREATE DATABASE db_1138120; +\c db_1138120 +CREATE USER user1_1138120 PASSWORD 'Abc@1138120'; +grant all on database db_1138120 to user1_1138120; +CREATE USER user2_1138120 PASSWORD 'Abc@1138120'; +grant all on database db_1138120 to user2_1138120; +-- create synonym +create or replace synonym user2_1138120.syn1_1138120 for user1_1138120.tab_1138120; +-- \c - user1_1138120 +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +create table tab_1138120 (id int,name text); +insert into tab_1138120 values (1,'abc'); +select * from tab_1138120; +-- \c - user2_1138120 +-- no permission for table and schema +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +select * from syn1_1138120; +-- add table permission +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +grant all privileges on table tab_1138120 to user2_1138120; +-- no permission for schema +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +select * from syn1_1138120; +-- add schema permission +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +grant usage on schema user1_1138120 to user2_1138120; +-- have permission for schema and table, query success +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +select * from syn1_1138120; +-- revoke table permission +SET SESSION AUTHORIZATION user1_1138120 password 'Abc@1138120'; +revoke all privileges on table tab_1138120 from user2_1138120; +-- no table permission +SET SESSION AUTHORIZATION user2_1138120 password 'Abc@1138120'; +select * from user1_1138120.tab_1138120; +select * from syn1_1138120; +--clear +\c postgres +drop database db_1138120; +drop user user1_1138120; +drop user user2_1138120; -- Gitee From 03b6b1133d6b30bfea265eab5edd64c5c7d1074c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=AE=E5=82=A8-=E7=8E=8B=E5=BB=BA=E8=BE=BE?= Date: Thu, 22 Aug 2024 23:39:40 +0800 Subject: [PATCH 296/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=A4=87=E4=BB=BD?= =?UTF-8?q?=E6=81=A2=E5=A4=8D=E5=B7=A5=E5=85=B7=E5=AE=A1=E8=AE=A1=E6=97=A5?= =?UTF-8?q?=E5=BF=97issue?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_basebackup/pg_basebackup.cpp | 4 +- src/bin/pg_dump/pg_dump.cpp | 20 +++++++-- src/bin/pg_dump/pg_dumpall.cpp | 4 +- src/bin/pg_dump/pg_restore.cpp | 2 +- src/bin/pg_probackup/pg_probackup.cpp | 2 +- src/lib/elog/elog.cpp | 24 ++++++++++- .../regress/input/backup_tool_audit.source | 40 ++++++++++++++++++ .../regress/output/backup_tool_audit.source | 42 +++++++++++++++++++ src/test/regress/parallel_schedule0 | 1 + src/test/regress/parallel_schedule0A | 3 ++ 10 files changed, 131 insertions(+), 11 deletions(-) create mode 100644 src/test/regress/input/backup_tool_audit.source create mode 100644 src/test/regress/output/backup_tool_audit.source diff --git a/src/bin/pg_basebackup/pg_basebackup.cpp b/src/bin/pg_basebackup/pg_basebackup.cpp index 4dbb88b57e..88c47e729b 100644 --- a/src/bin/pg_basebackup/pg_basebackup.cpp +++ b/src/bin/pg_basebackup/pg_basebackup.cpp @@ -47,6 +47,8 @@ #include "fetchmot.h" #endif +#define PROG_NAME_BACKUP "gs_basebackup" + typedef struct TablespaceListCell { struct TablespaceListCell* next; char old_dir[MAXPGPATH]; @@ -1649,7 +1651,7 @@ int main(int argc, char **argv) { progname = get_progname(argv[0]); if (!strcmp(progname, "gs_basebackup")) { - init_audit(progname, argc, argv); + init_audit(PROG_NAME_BACKUP, argc, argv); return GsBaseBackup(argc, argv); } else if (!strcmp(progname, "gs_tar")) { return GsTar(argc, argv); diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index a71b4a0735..bca527a8d8 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -744,6 +744,7 @@ int main(int argc, char** argv) } } + init_audit(PROG_NAME, argc, argv); /* parse the dumpall options */ getopt_dump(argc, argv, long_options, &optindex); @@ -754,7 +755,6 @@ int main(int argc, char** argv) // log output redirect init_log((char*)PROG_NAME); - init_audit(PROG_NAME, argc, argv); validatedumpoptions(); /* Identify archive format to emit */ @@ -1463,6 +1463,7 @@ void getopt_dump(int argc, char** argv, struct option options[], int* result) char* listFilePath = NULL; char* listFileName = NULL; bool if_compress = false; + errno_t rc = EOK; /* check if a required_argument option has a void argument */ char optstring[] = "abcCE:f:F:g:h:n:N:oOp:q:RsS:t:T:U:vwW:xZ:"; @@ -1763,9 +1764,20 @@ void getopt_dump(int argc, char** argv, struct option options[], int* result) } /* Get database name from command line */ - if (optind < argc) - dbname = argv[optind++]; - + if (optind < argc) { + dbname = gs_strdup(argv[optind]); + if (strncmp(argv[optind], "postgresql://", strlen("postgresql://")) == 0) { + char *off_argv = argv[optind] + strlen("postgresql://"); + rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); + check_memset_s(rc); + } else if (strncmp(argv[optind], "postgres://", strlen("postgres://")) == 0) { + char *off_argv = argv[optind] + strlen("postgres://"); + rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); + check_memset_s(rc); + } + optind++; + } + /* Complain if any arguments remain */ if (optind < argc) { write_stderr(_("%s: too many command-line arguments (first is \"%s\")\n"), progname, argv[optind]); diff --git a/src/bin/pg_dump/pg_dumpall.cpp b/src/bin/pg_dump/pg_dumpall.cpp index d4eea2d3a4..da2d724436 100644 --- a/src/bin/pg_dump/pg_dumpall.cpp +++ b/src/bin/pg_dump/pg_dumpall.cpp @@ -302,7 +302,8 @@ int main(int argc, char* argv[]) progname); exit_nicely(1); } - + + init_audit(PROG_NAME, argc, argv); /* parse the dumpall options */ getopt_dumpall(argc, argv, long_options, &optindex); @@ -310,7 +311,6 @@ int main(int argc, char* argv[]) get_password_pipeline(); } - init_audit(PROG_NAME, argc, argv); /* validate the optons values */ validate_dumpall_options(argv); diff --git a/src/bin/pg_dump/pg_restore.cpp b/src/bin/pg_dump/pg_restore.cpp index ebf910fddf..2e4216884b 100644 --- a/src/bin/pg_dump/pg_restore.cpp +++ b/src/bin/pg_dump/pg_restore.cpp @@ -206,6 +206,7 @@ int main(int argc, char** argv) } } + init_audit(PROG_NAME, argc, argv); /* parse the restore options for gs_restore*/ restore_getopts(argc, argv, cmdopts, opts, &inputFileSpec); @@ -267,7 +268,6 @@ int main(int argc, char** argv) (void)fclose(fp); fp = NULL; - init_audit(PROG_NAME, argc, argv); /* validate the restore options before start the actual operation */ validate_restore_options(argv, opts); if (is_encrypt && module_params == NULL) { diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index c78fec59d7..9a266e575c 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -834,6 +834,7 @@ int main(int argc, char *argv[]) */ main_tid = pthread_self(); + init_audit(PROG_NAME, argc, argv); /* Parse subcommands and non-subcommand options */ if (argc > 1) { @@ -847,7 +848,6 @@ int main(int argc, char *argv[]) * Make command string before getopt_long() will call. It permutes the * content of argv. */ - init_audit(PROG_NAME, argc, argv); /* TODO why do we do that only for some commands? */ command_name = gs_pstrdup(argv[1]); command = make_command_string(argc, argv); diff --git a/src/lib/elog/elog.cpp b/src/lib/elog/elog.cpp index eb9a1513a3..fc33eea616 100644 --- a/src/lib/elog/elog.cpp +++ b/src/lib/elog/elog.cpp @@ -732,11 +732,31 @@ static void report_command(FILE *fp, auditConfig *audit_cfg) { securec_check_c(rc, "\0", "\0"); rc = strcat_s(command, MAXPGPATH, " "); securec_check_c(rc, "\0", "\0"); + + bool is_pass = false; for (int i = 1; i/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_basebackup -" $GAUSSLOG/bin/gs_basebackup/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump failure +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump - >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump -" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump success +create database gs_dump_audit; +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump -p @portstring@ gs_dump_audit > @abs_bindir@/gs_dump_audit.log 2>&1 ; echo $? +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[SUCCESS\].*gs_dump -p @portstring@ gs_dump_audit" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump -W'asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump -W 'asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump --password 'asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump --password='asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG + +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump "postgresql://asdasd" >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump postgresql:\/\/\*\*\*\*\*\*" $GAUSSLOG/bin/gs_dump/audit* +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG \ No newline at end of file diff --git a/src/test/regress/output/backup_tool_audit.source b/src/test/regress/output/backup_tool_audit.source new file mode 100644 index 0000000000..dd2d77803e --- /dev/null +++ b/src/test/regress/output/backup_tool_audit.source @@ -0,0 +1,42 @@ +-- gs_basebackup failure +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_basebackup - >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_basebackup -" $GAUSSLOG/bin/gs_basebackup/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump failure +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump - >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump -" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump success +create database gs_dump_audit; +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump -p @portstring@ gs_dump_audit > @abs_bindir@/gs_dump_audit.log 2>&1 ; echo $? +0 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[SUCCESS\].*gs_dump -p @portstring@ gs_dump_audit" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump -W'asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump -W 'asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump --password 'asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump --password='asd' >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG +-- gs_dump with password +\! export GAUSSLOG=@abs_bindir@/audit; @abs_bindir@/gs_dump "postgresql://asdasd" >/dev/null 2>&1 +\! GAUSSLOG=@abs_bindir@/audit; grep -rc "\[FAILURE\].*gs_dump postgresql:\/\/\*\*\*\*\*\*" $GAUSSLOG/bin/gs_dump/audit* +1 +\! GAUSSLOG=@abs_bindir@/audit; rm -rf $GAUSSLOG diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 921478c21b..ee7451077c 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -1129,3 +1129,4 @@ test: enable_expr_fusion_flatten test: on_update_session1 on_update_session2 test: ts_gb18030_utf8 +test: backup_tool_audit \ No newline at end of file diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 213015b877..34f0a1a60f 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -491,3 +491,6 @@ test: test_binary_suffix # test for rotate and unrotate function test: gb_ora_rotate_unrotate + +# test backup tool audit log +test: backup_tool_audit -- Gitee From dff0950d15fb70a86aa8943692409dec0b35fe37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Wed, 11 Sep 2024 19:59:27 +0800 Subject: [PATCH 297/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddecode=E6=8A=A5?= =?UTF-8?q?=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/nodeFuncs.cpp | 9 ++ src/common/backend/nodes/readfuncs.cpp | 16 ++ src/common/backend/utils/adt/ruleutils.cpp | 10 ++ .../expected/decode_compatible_with_o.out | 148 ++++++++++++++++++ .../regress/sql/decode_compatible_with_o.sql | 142 +++++++++++++++++ 5 files changed, 325 insertions(+) diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp index 0ecffd31b3..2175674ead 100644 --- a/src/common/backend/nodes/nodeFuncs.cpp +++ b/src/common/backend/nodes/nodeFuncs.cpp @@ -974,6 +974,7 @@ Oid exprCollation(const Node* expr) case T_NanTest: case T_InfiniteTest: case T_HashFilter: + case T_TypeCast: coll = InvalidOid; /* result is always boolean */ break; case T_BooleanTest: @@ -2922,6 +2923,14 @@ Node* expression_tree_mutator(Node* node, Node* (*mutator)(Node*, void*), void* return (Node*)newnode; } break; + case T_TypeCast: { + TypeCast *tc = (TypeCast*)node; + TypeCast *newnode = NULL; + FLATCOPY(newnode, tc, TypeCast, isCopy); + MUTATE(newnode->arg, tc->arg, Node*); + return (Node*)newnode; + } break; + default: ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized node type: %d", (int)nodeTag(node)))); diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 7df5fdb85d..76a176de3f 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -5978,6 +5978,20 @@ static TypeName* _readTypeName() READ_DONE(); } +/** + * @Description: deserialize the TypeCast struct. + * @in str, deserialized string. + * @return TypeCast struct. + */ +static TypeCast* _readTypeCast(void) +{ + READ_LOCALS(TypeCast); + READ_NODE_FIELD(arg); + READ_NODE_FIELD(typname); + READ_INT_FIELD(location); + READ_DONE(); +} + /** * @Description: deserialize the IndexElem struct. * @in str, deserialized string. @@ -6719,6 +6733,8 @@ Node* parseNodeString(void) return_value = _readDeclareCursorStmt(); } else if (MATCH("CURSOREXPRESSION", 16)) { return_value = _readCursorExpression(); + } else if (MATCH("TYPECAST", 8)) { + return_value = _readTypeCast(); } else if (MATCH("NESTLOOP", 8)) { return_value = _readNestLoop(); } else if (MATCH("SEQSCAN", 7)) { diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 0f8b670b9a..5dc2db4fe7 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -10798,6 +10798,16 @@ static void get_rule_expr(Node* node, deparse_context* context, bool showimplici appendStringInfo(buf, "CURSOR(%s)", stmt->raw_query_str); } break; + case T_TypeCast: { + TypeCast* tc = (TypeCast*) node; + if (showimplicit) { + get_coercion_expr(tc->arg, context, tc->typname->typeOid, -1, node); + } else { + /* don't show the implicit cast */ + get_rule_expr_paren(tc->arg, context, false, node, no_alias); + } + } break; + #ifdef USE_SPQ case T_DMLActionExpr: appendStringInfo(buf, "DMLAction"); diff --git a/src/test/regress/expected/decode_compatible_with_o.out b/src/test/regress/expected/decode_compatible_with_o.out index 2523f914ed..1f9acd33f7 100755 --- a/src/test/regress/expected/decode_compatible_with_o.out +++ b/src/test/regress/expected/decode_compatible_with_o.out @@ -8601,6 +8601,154 @@ CONTEXT: referenced column: case select case '1.0' when 1 then 'same' else 'different' end; ERROR: invalid input syntax for type bigint: "1.0" CONTEXT: referenced column: case +set sql_beta_feature = 'none'; +create table base_tab_000 ( +col_tinyint tinyint, +col_smallint smallint, +col_int integer, +col_bigint bigint, +col_numeric numeric, +col_real real, +col_double double precision, +col_decimal decimal, +col_varchar varchar, +col_char char(30), +col_nvarchar2 nvarchar2, +col_text text, +col_timestamptz timestamp with time zone, +col_timestamp timestamp without time zone, +col_date date, +col_time time without time zone, +col_timetz time with time zone, +col_interval interval, +col_smalldatetine smalldatetime) +partition by range (col_int) +( +partition vector_base_tab_000_1 values less than (10), +partition vector_base_tab_000_2 values less than (1357), +partition vector_base_tab_000_3 values less than (2687), +partition vector_base_tab_000_4 values less than (maxvalue) +); +create table base_type_tab_000 ( +col_tinyint tinyint, +col_smallint smallint, +col_int integer, +col_bigint bigint, +col_money money, +col_numeric numeric, +col_real real, +col_double double precision, +col_decimal decimal, +col_varchar varchar, +col_char char(30), +col_nvarchar2 nvarchar2, +col_text text, +col_timestamp timestamp with time zone, +col_timestamptz timestamp without time zone, +col_date date, +col_time time without time zone, +col_timetz time with time zone, +col_interval interval, +col_tinterval tinterval, +col_smalldatetine smalldatetime, +col_bytea bytea, +col_boolean boolean, +col_inet inet, +col_cidr cidr, +col_bit bit(10), +col_varbit varbit(10), +col_oid oid) ; +CREATE TABLE Customer ( +c_id int , +c_d_id int , +c_w_id int , +c_first varchar(16) , +c_middle char(2) , +c_last varchar(16) , +c_street_1 varchar(20) , +c_street_2 varchar(20) , +c_city varchar(20) , +c_state char(2) , +c_zip char(9) , +c_phone char(16) , +c_since timestamp , +c_credit char(2) , +c_credit_lim numeric(12,2) , +c_discount numeric(4,4) , +c_balance numeric(12,2) , +c_ytd_payment numeric(12,2) , +c_payment_cnt int , +c_delivery_cnt int , +c_data varchar(500)) +partition by range (c_id) +( +partition vector_engine_Customer_1 values less than (10), +partition vector_engine_Customer_2 values less than (77), +partition vector_engine_Customer_3 values less than (337), +partition vector_engine_Customer_4 values less than (573), +partition vector_engine_Customer_5 values less than (1357), +partition vector_engine_Customer_6 values less than (2033), +partition vector_engine_Customer_7 values less than (2087), +partition vector_engine_Customer_8 values less than (2387), +partition vector_engine_Customer_9 values less than (2687), +partition vector_engine_Customer_10 values less than (2987), +partition vector_engine_Customer_11 values less than (maxvalue) +); +select decode(bitand(a.col_int, 1), 1, 'warehouse', 'postoffice') as case1, +decode(bitand(a.col_tinyint, 2), 2, 'ground', 'air') as case2, +decode(bitand(b.col_smallint, 4), 4, 'insured', 'certified') as case3, +decode(a.col_real, b.col_real, a.col_char, 'postoffice'), +decode(b.col_nvarchar2, a.col_nvarchar2, 'ground', 'air'), +decode(a.col_double, b.col_double, 'insured', 'certified'), +decode(b.col_text, a.col_text, 'yes', a.col_varchar, 'no', 'default'), +decode(a.col_char, +b.col_nvarchar2, +'t', +cast('h' as char), +false, +true), +decode(cast('h' as char), a.col_nvarchar2, 't', b.col_char, 'f', 't'), +decode(to_date('2010-8-1', 'yyyy-mm-dd'), +' 2010-08-01 00:00:00', +1, +to_date('2010-8-1', 'yyyy-mm-dd'), +2, +3), +'print1:' || decode('myvar1', '', 1, 'myvar2', 2) +from base_tab_000 a +join base_type_tab_000 b +on a.col_smallint = b.col_smallint +and exists +(select 1 from Customer where a.col_int = c_id) +order by 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11; + case1 | case2 | case3 | case | case | case | case | case | case | case | ?column? +-------+-------+-------+------+------+------+------+------+------+------+---------- +(0 rows) + +create table t1 (id int, c_int int, c_varchar varchar(50)) with (storage_type=ustore); +create or replace procedure p1 (tnum int, tname varchar) as +x int; +begin +for i in 1..tnum loop + execute immediate 'insert into ' || tname || ' values (' || i || ',' || i || ',' || '''abc''' || ')'; +end loop; +end; +/ +call p1(100, 't1'); + p1 +---- + +(1 row) + +select count(*) from (select sin((select max(B.id) +from t1 B where B.id > 5))+ decode((select max(B.id) from t1 B where B.id > 5), 100000, 200, 1000000, 300) + decode ((select max (B.id) +from t1 B where B.id>5), 100000, 200, 1000000, 300) X +from t1 A) order by 1; + count +------- + 100 +(1 row) + reset sql_beta_feature; reset timezone; \c regression diff --git a/src/test/regress/sql/decode_compatible_with_o.sql b/src/test/regress/sql/decode_compatible_with_o.sql index c727383b91..7612e33180 100644 --- a/src/test/regress/sql/decode_compatible_with_o.sql +++ b/src/test/regress/sql/decode_compatible_with_o.sql @@ -1773,6 +1773,148 @@ select decode('1.0'::text, 1, 'same', 'different'); select case 1 when '1.0' then 'same' else 'different' end; select case '1.0' when 1 then 'same' else 'different' end; +set sql_beta_feature = 'none'; +create table base_tab_000 ( +col_tinyint tinyint, +col_smallint smallint, +col_int integer, +col_bigint bigint, +col_numeric numeric, +col_real real, +col_double double precision, +col_decimal decimal, +col_varchar varchar, +col_char char(30), +col_nvarchar2 nvarchar2, +col_text text, +col_timestamptz timestamp with time zone, +col_timestamp timestamp without time zone, +col_date date, +col_time time without time zone, +col_timetz time with time zone, +col_interval interval, +col_smalldatetine smalldatetime) +partition by range (col_int) +( +partition vector_base_tab_000_1 values less than (10), +partition vector_base_tab_000_2 values less than (1357), +partition vector_base_tab_000_3 values less than (2687), +partition vector_base_tab_000_4 values less than (maxvalue) +); +create table base_type_tab_000 ( +col_tinyint tinyint, +col_smallint smallint, +col_int integer, +col_bigint bigint, +col_money money, +col_numeric numeric, +col_real real, +col_double double precision, +col_decimal decimal, +col_varchar varchar, +col_char char(30), +col_nvarchar2 nvarchar2, +col_text text, +col_timestamp timestamp with time zone, +col_timestamptz timestamp without time zone, +col_date date, +col_time time without time zone, +col_timetz time with time zone, +col_interval interval, +col_tinterval tinterval, +col_smalldatetine smalldatetime, +col_bytea bytea, +col_boolean boolean, +col_inet inet, +col_cidr cidr, +col_bit bit(10), +col_varbit varbit(10), +col_oid oid) ; + +CREATE TABLE Customer ( +c_id int , +c_d_id int , +c_w_id int , +c_first varchar(16) , +c_middle char(2) , +c_last varchar(16) , +c_street_1 varchar(20) , +c_street_2 varchar(20) , +c_city varchar(20) , +c_state char(2) , +c_zip char(9) , +c_phone char(16) , +c_since timestamp , +c_credit char(2) , +c_credit_lim numeric(12,2) , +c_discount numeric(4,4) , +c_balance numeric(12,2) , +c_ytd_payment numeric(12,2) , +c_payment_cnt int , +c_delivery_cnt int , +c_data varchar(500)) + +partition by range (c_id) +( +partition vector_engine_Customer_1 values less than (10), +partition vector_engine_Customer_2 values less than (77), +partition vector_engine_Customer_3 values less than (337), +partition vector_engine_Customer_4 values less than (573), +partition vector_engine_Customer_5 values less than (1357), +partition vector_engine_Customer_6 values less than (2033), +partition vector_engine_Customer_7 values less than (2087), +partition vector_engine_Customer_8 values less than (2387), +partition vector_engine_Customer_9 values less than (2687), +partition vector_engine_Customer_10 values less than (2987), +partition vector_engine_Customer_11 values less than (maxvalue) +); + +select decode(bitand(a.col_int, 1), 1, 'warehouse', 'postoffice') as case1, +decode(bitand(a.col_tinyint, 2), 2, 'ground', 'air') as case2, +decode(bitand(b.col_smallint, 4), 4, 'insured', 'certified') as case3, +decode(a.col_real, b.col_real, a.col_char, 'postoffice'), +decode(b.col_nvarchar2, a.col_nvarchar2, 'ground', 'air'), +decode(a.col_double, b.col_double, 'insured', 'certified'), +decode(b.col_text, a.col_text, 'yes', a.col_varchar, 'no', 'default'), +decode(a.col_char, +b.col_nvarchar2, +'t', +cast('h' as char), +false, +true), +decode(cast('h' as char), a.col_nvarchar2, 't', b.col_char, 'f', 't'), +decode(to_date('2010-8-1', 'yyyy-mm-dd'), +' 2010-08-01 00:00:00', +1, +to_date('2010-8-1', 'yyyy-mm-dd'), +2, +3), +'print1:' || decode('myvar1', '', 1, 'myvar2', 2) +from base_tab_000 a +join base_type_tab_000 b +on a.col_smallint = b.col_smallint +and exists +(select 1 from Customer where a.col_int = c_id) +order by 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11; + +create table t1 (id int, c_int int, c_varchar varchar(50)) with (storage_type=ustore); + +create or replace procedure p1 (tnum int, tname varchar) as +x int; +begin +for i in 1..tnum loop + execute immediate 'insert into ' || tname || ' values (' || i || ',' || i || ',' || '''abc''' || ')'; +end loop; +end; +/ + +call p1(100, 't1'); + +select count(*) from (select sin((select max(B.id) +from t1 B where B.id > 5))+ decode((select max(B.id) from t1 B where B.id > 5), 100000, 200, 1000000, 300) + decode ((select max (B.id) +from t1 B where B.id>5), 100000, 200, 1000000, 300) X +from t1 A) order by 1; + reset sql_beta_feature; reset timezone; -- Gitee From 5e1cef64e43a85c6c5ca7a6bb869fbb7b5bba094 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Thu, 12 Sep 2024 15:13:38 +0800 Subject: [PATCH 298/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B5=84=E6=BA=90?= =?UTF-8?q?=E6=B1=A0=E5=8C=96=E5=A4=87=E9=9B=86=E7=BE=A4=E8=AF=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../process/threadpool/knl_thread.cpp | 1 + .../transam/parallel_recovery/dispatcher.cpp | 7 ++++++- src/gausskernel/storage/buffer/bufmgr.cpp | 5 +++-- .../storage/smgr/segment/segxlog.cpp | 17 ++++++++++------- src/include/knl/knl_thread.h | 1 + 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/gausskernel/process/threadpool/knl_thread.cpp b/src/gausskernel/process/threadpool/knl_thread.cpp index a20b0eb613..1c5c27551b 100755 --- a/src/gausskernel/process/threadpool/knl_thread.cpp +++ b/src/gausskernel/process/threadpool/knl_thread.cpp @@ -402,6 +402,7 @@ static void knl_t_xlog_init(knl_t_xlog_context* xlog_cxt) xlog_cxt->LocalXLogInsertAllowed = -1; xlog_cxt->ArchiveRecoveryRequested = false; xlog_cxt->InArchiveRecovery = false; + xlog_cxt->inRedoExtendSegment = false; xlog_cxt->ArchiveRestoreRequested = false; xlog_cxt->restoredFromArchive = false; xlog_cxt->recoveryRestoreCommand = NULL; diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp index 5763320478..1e56ff2285 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp @@ -1166,7 +1166,12 @@ static bool DispatchRepOriginRecord(XLogReaderState *record, List *expectedTLIs, /* Run from the dispatcher thread. */ static bool DispatchCLogRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + uint8 info = XLogRecGetInfo(record) & (~XLR_INFO_MASK); + if (info == CLOG_ZEROPAGE) { + DispatchRecordWithoutPage(record, expectedTLIs); + } else { + DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + } return false; } diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index c31b7b4103..2483cb96f9 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -2569,7 +2569,7 @@ Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber fork * head may be re-used, i.e., the relfilenode may be reused. Thus the * smgrnblocks interface can not be used on standby. Just skip this check. */ - } else if (RecoveryInProgress() && !IsSegmentFileNode(smgr->smgr_rnode.node)) { + } else if (RecoveryInProgress()) { BlockNumber totalBlkNum = smgrnblocks_cached(smgr, forkNum); /* Update cached blocks */ @@ -2577,7 +2577,8 @@ Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber fork totalBlkNum = smgrnblocks(smgr, forkNum); } - if (blockNum >= totalBlkNum) { + if ((blockNum >= totalBlkNum || totalBlkNum == InvalidBlockNumber) && + !t_thrd.xlog_cxt.inRedoExtendSegment && mode != RBM_ZERO_ON_ERROR) { return InvalidBuffer; } } diff --git a/src/gausskernel/storage/smgr/segment/segxlog.cpp b/src/gausskernel/storage/smgr/segment/segxlog.cpp index 64671b3d4b..c44bcaed59 100644 --- a/src/gausskernel/storage/smgr/segment/segxlog.cpp +++ b/src/gausskernel/storage/smgr/segment/segxlog.cpp @@ -643,6 +643,16 @@ static void redo_atomic_xlog(XLogReaderState *record) static void redo_seghead_extend(XLogReaderState *record) { RedoBufferInfo redo_buf; + t_thrd.xlog_cxt.inRedoExtendSegment = true; + XLogInitBufferForRedo(record, 1, &redo_buf); + t_thrd.xlog_cxt.inRedoExtendSegment = false; + if (BufferIsValid(redo_buf.buf)) { + memset_s(redo_buf.pageinfo.page, BLCKSZ, 0, BLCKSZ); + PageSetLSN(redo_buf.pageinfo.page, redo_buf.lsn); + MarkBufferDirty(redo_buf.buf); + UnlockReleaseBuffer(redo_buf.buf); + } + XLogRedoAction redo_action = XLogReadBufferForRedo(record, 0, &redo_buf); if (redo_action == BLK_NEEDS_REDO) { char *data = XLogRecGetBlockData(record, 0, NULL); @@ -667,13 +677,6 @@ static void redo_seghead_extend(XLogReaderState *record) if (SSCheckInitPageXLogSimple(record, 1, &redo_buf) == BLK_DONE) { return; } - XLogInitBufferForRedo(record, 1, &redo_buf); - if (BufferIsValid(redo_buf.buf)) { - memset_s(redo_buf.pageinfo.page, BLCKSZ, 0, BLCKSZ); - PageSetLSN(redo_buf.pageinfo.page, redo_buf.lsn); - MarkBufferDirty(redo_buf.buf); - UnlockReleaseBuffer(redo_buf.buf); - } } /* diff --git a/src/include/knl/knl_thread.h b/src/include/knl/knl_thread.h index 068b3e6822..014ba05c02 100755 --- a/src/include/knl/knl_thread.h +++ b/src/include/knl/knl_thread.h @@ -516,6 +516,7 @@ typedef struct knl_t_xlog_context { */ bool ArchiveRecoveryRequested; bool InArchiveRecovery; + bool inRedoExtendSegment; bool ArchiveRestoreRequested; /* Was the last xlog file restored from archive, or local? */ -- Gitee From b436e223dcf80e12a7e205f707f23785165f6bd7 Mon Sep 17 00:00:00 2001 From: KeKe Date: Thu, 12 Sep 2024 11:57:37 +0800 Subject: [PATCH 299/347] =?UTF-8?q?cherry=20pick=20ec06508=20from=20https:?= =?UTF-8?q?//gitee.com/wangxingmiao/openGauss-server/pulls/6305=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9C=A8=E9=9D=9EsimpleInstall=E7=9B=AE?= =?UTF-8?q?=E5=BD=95=E4=B8=8B=E6=9E=81=E7=AE=80=E5=AE=89=E8=A3=85=EF=BC=8C?= =?UTF-8?q?=E5=88=9B=E5=BB=BAdemo=20database=E6=8A=A5=E9=94=99=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- simpleInstall/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simpleInstall/install.sh b/simpleInstall/install.sh index ef6bffe9e4..617aed6295 100644 --- a/simpleInstall/install.sh +++ b/simpleInstall/install.sh @@ -365,7 +365,7 @@ function fn_install_demoDB() fi if [ "$input"X == "yes"X ] then - fn_load_demoDB 1>load.log 2>&1 + fn_load_demoDB 1>$shell_path/load.log 2>&1 fn_check_demoDB elif [ "$input"X == "no"X ] then -- Gitee From 1d8d5bc1813396a8e1d76b7c36cb3eb15d818390 Mon Sep 17 00:00:00 2001 From: leiziwei Date: Mon, 9 Sep 2024 17:29:00 +0800 Subject: [PATCH 300/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=B5=8C=E5=A5=97?= =?UTF-8?q?=E6=95=B0=E7=BB=84=E8=B5=8B=E5=80=BCbug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/pl/plpgsql/src/gram.y | 40 +++++- src/gausskernel/runtime/executor/execQual.cpp | 2 +- .../plpgsql_nested_array_and_record.out | 124 ++++++++++++++++++ .../sql/plpgsql_nested_array_and_record.sql | 108 +++++++++++++++ 4 files changed, 271 insertions(+), 3 deletions(-) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 1ee23aff6a..1b9dd59bc8 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -2213,6 +2213,15 @@ record_attr : attr_name decl_datatype decl_notnull decl_rec_defval attr->attrname = $1; PLpgSQL_type *var_type = ((PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$2])->datatype; + PLpgSQL_var *varray_type = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$2]; + + if (varray_type->nest_table != NULL) { + ereport(errstate, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("nested table of type is not supported as record type attribute"), + parser_errposition(@3))); + u_sess->plsql_cxt.have_error = true; + } PLpgSQL_type *new_var_type = build_array_type_from_elemtype(var_type); new_var_type->collectionType = var_type->collectionType; new_var_type->tableOfIndexType = var_type->tableOfIndexType; @@ -2241,7 +2250,7 @@ record_attr : attr_name decl_datatype decl_notnull decl_rec_defval $$ = attr; } - | attr_name table_var decl_notnull decl_rec_defval + | attr_name table_var decl_notnull decl_rec_defval { PLpgSQL_rec_attr *attr = NULL; @@ -8674,6 +8683,11 @@ read_sql_construct6(int until, int loc = 0; int curloc = 0; int brack_cnt = 0; + int nest_layers = 0; + int left_brace_count = 0; + int right_brace_count = 0; + bool stop_count = false; + int stop_tok; /* mark if there are 2 table of index by var call functions in an expr */ int tableof_func_dno = -1; int tableof_var_dno = -1; @@ -8702,6 +8716,14 @@ read_sql_construct6(int until, { prev_tok = tok; tok = yylex(); + if (tok == '\"' || tok == '\'') { + if (stop_count && stop_tok == tok) { + stop_count = false; + } else { + stop_count = true; + stop_tok = tok; + } + } tokenstack = push_token_stack(tok, tokenstack); loc = yylloc; if (startlocation < 0) /* remember loc of first token */ @@ -8998,6 +9020,8 @@ read_sql_construct6(int until, brack_cnt--; /* fall through */ case ')': + if (!stop_count) + right_brace_count++; if (context.list_right_bracket && context.list_right_bracket->length && linitial_int(context.list_right_bracket) == parenlevel) { /* append bracket instead of parentheses */ @@ -9055,6 +9079,8 @@ read_sql_construct6(int until, brack_cnt++; /* fall through */ case '(': + if (!stop_count) + left_brace_count++; if (context.list_left_bracket && context.list_left_bracket->length && linitial_int(context.list_left_bracket) == parenlevel - 1) { appendStringInfoString(&ds, left_bracket); @@ -9266,6 +9292,10 @@ read_sql_construct6(int until, yylval = temptokendata->lval; u_sess->plsql_cxt.curr_compile_context->plpgsql_yyleng = temptokendata->leng; } + if (left_brace_count == 0) + { + nest_layers = var->nest_layers; + } ds_changed = construct_array_start(&ds, &context, var->datatype, &tok, parenlevel, loc); break; } @@ -9287,6 +9317,10 @@ read_sql_construct6(int until, } int dno = yylval.wdatum.datum->dno; PLpgSQL_var *var = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[dno]; + if (left_brace_count == 0) + { + nest_layers = var->nest_layers; + } ds_changed = construct_array_start(&ds, &context, var->datatype, &tok, parenlevel, loc); break; } @@ -9364,7 +9398,9 @@ read_sql_construct6(int until, if (IS_ARRAY_STATE(context.list_array_state, ARRAY_COERCE)) { /* always append right parentheses at end of each element */ appendStringInfoString(&ds, right_parentheses); - plpgsql_append_object_typename(&ds, (PLpgSQL_type *)linitial(context.list_datatype)); + if ((left_brace_count - right_brace_count) > nest_layers) { + plpgsql_append_object_typename(&ds, (PLpgSQL_type *)linitial(context.list_datatype)); + } SET_ARRAY_STATE(context.list_array_state, ARRAY_SEPERATOR); } } diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index 3b3594d28f..e0d0f3b86c 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -1434,7 +1434,7 @@ static Datum ExecEvalParamExtern(ExprState* exprstate, ExprContext* econtext, bo *isNull = prm->isnull; if (prm->tabInfo && prm->tabInfo->isnestedtable && plpgsql_estate) { - plpgsql_estate->curr_nested_table_type = prm->ptype; + plpgsql_estate->curr_nested_table_type = prm->tabInfo->tableOfIndexType; plpgsql_estate->curr_nested_table_layers = prm->tabInfo->tableOfLayers; } /* copy cursor option from param to econtext */ diff --git a/src/test/regress/expected/plpgsql_nested_array_and_record.out b/src/test/regress/expected/plpgsql_nested_array_and_record.out index 8be9a7a36d..132c94fc23 100644 --- a/src/test/regress/expected/plpgsql_nested_array_and_record.out +++ b/src/test/regress/expected/plpgsql_nested_array_and_record.out @@ -10,6 +10,52 @@ NOTICE: schema "plpgsql_nested_array_and_record" does not exist, skipping CREATE SCHEMA plpgsql_nested_array_and_record; SET current_schema = plpgsql_nested_array_and_record; -- array of arrays +CREATE OR REPLACE PROCEDURE test_nested AS +DECLARE + TYPE arr2 IS VARRAY(5) OF INTEGER; + TYPE arr1 IS VARRAY(5) OF INTEGER; + TYPE nt1 IS VARRAY(10) OF arr1; + TYPE rec1 IS RECORD(id int, arrarg nt1); + arr_rec rec1:=rec1(7, nt1(arr1(1,2,4,5),arr1(1,3))); +BEGIN + RAISE NOTICE 'ID: %', arr_rec.id; +END; +/ +ERROR: nested table of type is not supported as record type attribute +LINE 5: TYPE rec1 IS RECORD(id int, arrarg nt1); + ^ +QUERY: DECLARE + TYPE arr2 IS VARRAY(5) OF INTEGER; + TYPE arr1 IS VARRAY(5) OF INTEGER; + TYPE nt1 IS VARRAY(10) OF arr1; + TYPE rec1 IS RECORD(id int, arrarg nt1); + arr_rec rec1:=rec1(7, nt1(arr1(1,2,4,5),arr1(1,3))); +BEGIN + RAISE NOTICE 'ID: %', arr_rec.id; +END +CREATE OR REPLACE PROCEDURE test_nested AS +DECLARE + TYPE arr2 IS TABLE OF INTEGER; + TYPE arr1 IS TABLE OF INTEGER; + TYPE nt1 IS TABLE OF arr1; + TYPE rec1 IS RECORD(id int, arrarg nt1); + arr_rec rec1:=rec1(7, nt1(arr1(1,2,4,5),arr1(1,3))); +BEGIN + RAISE NOTICE 'ID: %', arr_rec.id; +END; +/ +ERROR: nested table of type is not supported as record type attribute +LINE 5: TYPE rec1 IS RECORD(id int, arrarg nt1); + ^ +QUERY: DECLARE + TYPE arr2 IS TABLE OF INTEGER; + TYPE arr1 IS TABLE OF INTEGER; + TYPE nt1 IS TABLE OF arr1; + TYPE rec1 IS RECORD(id int, arrarg nt1); + arr_rec rec1:=rec1(7, nt1(arr1(1,2,4,5),arr1(1,3))); +BEGIN + RAISE NOTICE 'ID: %', arr_rec.id; +END DECLARE TYPE arr1 IS VARRAY(5) OF INTEGER; TYPE arr2 IS VARRAY(5) OF arr1; @@ -26,6 +72,84 @@ NOTICE: RESULT: 2 NOTICE: RESULT: 3 NOTICE: RESULT: 4 NOTICE: RESULT: 5 +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +nva nt1 := nt1(t1(2,3,5), t1(55,6), t1(2,3,8)); +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(3); +END; +/ +ERROR: multidimensional arrays must have array expressions with matching dimensions +CONTEXT: referenced column: array +SQL statement "SELECT ARRAY[(ARRAY[(2),(3),(5)]), (ARRAY[(55),(6)]), (ARRAY[(2),(3),(8)])]" +PL/pgSQL function inline_code_block line 7 during statement block local variable initialization +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +nva nt1 := nt1(t1(2,3,5), t1(55,8,6), t1(2,3,8)); +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(1); +END; +/ +NOTICE: 55 +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +nva nt1 := nt1(va, t1(55,8,6), t1(2,3,8)); +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(1)(3); +END; +/ +NOTICE: 9 +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +TYPE nnt1 IS VARRAY(10) OF nt1; +nva nnt1 := nt1(nt1(t1(2,3,9), t1(55,8,6), t1(2,3,8)),nt1(t1(95,80,65), t1(2,3,9), t1(2,3,8))); +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(1)(3); +END; +/ +NOTICE: 65 +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +TYPE nnt1 IS VARRAY(10) OF nt1; +nva nnt1 := nt1(nt1(t1(2,3,9), va, t1(2,3,8)),nt1(t1(95,80,65), t1(2,3,9), t1(2,3,8))); +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(1)(2)(2); +END; +/ +NOTICE: 3 +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +TYPE nnt1 IS VARRAY(10) OF nt1; +nva nnt1 := nt1(nt1(t1(2,3,9), va, t1(2,3,8)),nt1(va, t1(2,3,9), t1(2,3,8))); +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(1)(3); +END; +/ +NOTICE: 9 CREATE OR REPLACE PROCEDURE test_nested_array as TYPE typ_PLArray_case0001 IS varray(3) OF integer; TYPE typ_PLArray_case0002 IS varray(3) OF typ_PLArray_case0001; diff --git a/src/test/regress/sql/plpgsql_nested_array_and_record.sql b/src/test/regress/sql/plpgsql_nested_array_and_record.sql index 39ef82f936..7f4a5ad49a 100644 --- a/src/test/regress/sql/plpgsql_nested_array_and_record.sql +++ b/src/test/regress/sql/plpgsql_nested_array_and_record.sql @@ -6,6 +6,30 @@ CREATE SCHEMA plpgsql_nested_array_and_record; SET current_schema = plpgsql_nested_array_and_record; -- array of arrays +CREATE OR REPLACE PROCEDURE test_nested AS +DECLARE + TYPE arr2 IS VARRAY(5) OF INTEGER; + TYPE arr1 IS VARRAY(5) OF INTEGER; + TYPE nt1 IS VARRAY(10) OF arr1; + TYPE rec1 IS RECORD(id int, arrarg nt1); + arr_rec rec1:=rec1(7, nt1(arr1(1,2,4,5),arr1(1,3))); +BEGIN + RAISE NOTICE 'ID: %', arr_rec.id; +END; +/ + +CREATE OR REPLACE PROCEDURE test_nested AS +DECLARE + TYPE arr2 IS TABLE OF INTEGER; + TYPE arr1 IS TABLE OF INTEGER; + TYPE nt1 IS TABLE OF arr1; + TYPE rec1 IS RECORD(id int, arrarg nt1); + arr_rec rec1:=rec1(7, nt1(arr1(1,2,4,5),arr1(1,3))); +BEGIN + RAISE NOTICE 'ID: %', arr_rec.id; +END; +/ + DECLARE TYPE arr1 IS VARRAY(5) OF INTEGER; TYPE arr2 IS VARRAY(5) OF arr1; @@ -18,6 +42,90 @@ BEGIN END; / +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3); + +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +nva nt1 := nt1(t1(2,3,5), t1(55,6), t1(2,3,8)); + +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(3); +END; +/ + +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3); + +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +nva nt1 := nt1(t1(2,3,5), t1(55,8,6), t1(2,3,8)); + +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(1); +END; +/ + +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); + +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +nva nt1 := nt1(va, t1(55,8,6), t1(2,3,8)); + +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(1)(3); +END; +/ + +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +TYPE nnt1 IS VARRAY(10) OF nt1; +nva nnt1 := nt1(nt1(t1(2,3,9), t1(55,8,6), t1(2,3,8)),nt1(t1(95,80,65), t1(2,3,9), t1(2,3,8))); + +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(1)(3); +END; +/ + +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +TYPE nnt1 IS VARRAY(10) OF nt1; +nva nnt1 := nt1(nt1(t1(2,3,9), va, t1(2,3,8)),nt1(t1(95,80,65), t1(2,3,9), t1(2,3,8))); + +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(1)(2)(2); +END; +/ + +DECLARE +TYPE t1 IS VARRAY(10) OF INTEGER; -- varray of integer +va t1 := t1(2,3,9); +TYPE nt1 IS VARRAY(10) OF t1; -- varray of varray of integer +TYPE nnt1 IS VARRAY(10) OF nt1; +nva nnt1 := nt1(nt1(t1(2,3,9), va, t1(2,3,8)),nt1(va, t1(2,3,9), t1(2,3,8))); + +i INTEGER; +va1 t1; +BEGIN + raise notice '%', nva(2)(1)(3); +END; +/ + CREATE OR REPLACE PROCEDURE test_nested_array as TYPE typ_PLArray_case0001 IS varray(3) OF integer; TYPE typ_PLArray_case0002 IS varray(3) OF typ_PLArray_case0001; -- Gitee From 990fe218926334b9e5ae42ecfe2cb4c88c926ba7 Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Thu, 5 Sep 2024 20:56:33 +0800 Subject: [PATCH 301/347] =?UTF-8?q?=E5=8D=95=E6=AC=A1=E6=89=A7=E8=A1=8C?= =?UTF-8?q?=E7=9A=84event=E5=9C=A8=E6=89=A7=E8=A1=8C=E5=A4=B1=E8=B4=A5?= =?UTF-8?q?=E5=90=8E=E8=BF=9B=E8=A1=8C=E5=88=A0=E9=99=A4=20=EF=BC=88cherry?= =?UTF-8?q?=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/pg_job.cpp | 15 ++++++++++++ src/include/catalog/pg_job.h | 1 + src/test/regress/expected/event.out | 34 +++++++++++++++++++++++---- src/test/regress/sql/event.sql | 8 +++++++ 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/src/common/backend/catalog/pg_job.cpp b/src/common/backend/catalog/pg_job.cpp index aea0ff6396..632c52618c 100644 --- a/src/common/backend/catalog/pg_job.cpp +++ b/src/common/backend/catalog/pg_job.cpp @@ -1063,18 +1063,28 @@ void execute_job(int4 job_id) /* Save error info */ MemoryContext ecxt = MemoryContextSwitchTo(current_context); ErrorData* edata = CopyErrorData(); + char* autoDrop; FlushErrorState(); + if (t_thrd.postgres_cxt.xact_started) { + t_thrd.postgres_cxt.xact_started = false; + } t_thrd.utils_cxt.CurrentResourceOwner = save; ResourceOwnerRelease(save, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); /* Update last_end_date and job_status='f' and failure_count++ */ update_pg_job_info(job_id, Pgjob_Fail, start_date, new_next_date, edata->message, is_scheduler_job); elog_job_detail(job_id, what, Pgjob_Fail, edata->message); + autoDrop = get_attribute_value_str(job_name, "auto_drop", AccessShareLock); + if (0 == pg_strcasecmp(job_interval, "null") && 0 == pg_strcasecmp(autoDrop, "true")) { + expire_backend_job(job_name, true); + } + (void)MemoryContextSwitchTo(ecxt); pfree_ext(job_interval); pfree_ext(what); + pfree_ext(autoDrop); ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), @@ -2121,6 +2131,8 @@ static void elog_job_detail(int4 job_id, char* what, Update_Pgjob_Status status, Relation relation = NULL; Datum values[Natts_pg_job]; bool nulls[Natts_pg_job]; + MemoryContext current_context = CurrentMemoryContext; + ResourceOwner save = t_thrd.utils_cxt.CurrentResourceOwner; StartTransactionCommand(); tup = get_job_tup(job_id); @@ -2158,6 +2170,9 @@ static void elog_job_detail(int4 job_id, char* what, Update_Pgjob_Status status, heap_close(relation, AccessShareLock); CommitTransactionCommand(); + + (void)MemoryContextSwitchTo(current_context); + t_thrd.utils_cxt.CurrentResourceOwner = save; } /* diff --git a/src/include/catalog/pg_job.h b/src/include/catalog/pg_job.h index 968ea68126..7c84d36afc 100644 --- a/src/include/catalog/pg_job.h +++ b/src/include/catalog/pg_job.h @@ -182,6 +182,7 @@ extern char *get_inline_schedule_name(Datum job_name); extern void drop_single_job_internal(PG_FUNCTION_ARGS); extern void ShowEventCommand(ShowEventStmt *stmt, DestReceiver* dest); extern TupleDesc GetEventResultDesc(); +extern void expire_backend_job(Datum job_name, bool auto_drop); #define JOBID_ALLOC_OK 0 /* alloc jobid ok */ #define JOBID_ALLOC_ERROR 1 /* alloc jobid error */ diff --git a/src/test/regress/expected/event.out b/src/test/regress/expected/event.out index dab25c0c63..3a5a1bd7b5 100644 --- a/src/test/regress/expected/event.out +++ b/src/test/regress/expected/event.out @@ -973,6 +973,33 @@ drop event if exists priv_e_b; NOTICE: event "priv_e_b" is not exists, skipping create event priv_a.priv_e_b on schedule at sysdate disable do select 1; drop event if exists priv_e_b; +-- event execute failed and auto_drop is true, shuold drop event +create event priv_a.failed_and_drop on schedule at now() do insert into t-t-t select 666; -- execute failed, and should drop event +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +select count(*) from pg_job where job_name = 'failed_and_drop'; + count +------- + 0 +(1 row) + +create event priv_a.failed_and_drop on schedule at now() on completion preserve do insert into t-t-t select 1689; -- execute failed, don't drop +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +select count(*) from pg_job where job_name = 'failed_and_drop'; + count +------- + 1 +(1 row) + \c event_b revoke all on schema priv_b from priv_a; --test ALTER @@ -1023,10 +1050,9 @@ select pg_sleep(2); (1 row) select job_name, enable, failure_msg from pg_job where job_name = 'ee11'; - job_name | enable | failure_msg -----------+--------+------------------------------------------- - ee11 | t | relation "t1" does not exist on datanode1 -(1 row) + job_name | enable | failure_msg +----------+--------+------------- +(0 rows) \c event_b grant usage on schema priv_a to priv_c; diff --git a/src/test/regress/sql/event.sql b/src/test/regress/sql/event.sql index 93a28fabe2..248a401355 100644 --- a/src/test/regress/sql/event.sql +++ b/src/test/regress/sql/event.sql @@ -398,6 +398,14 @@ drop event if exists priv_e_b; create event priv_a.priv_e_b on schedule at sysdate disable do select 1; drop event if exists priv_e_b; +-- event execute failed and auto_drop is true, shuold drop event +create event priv_a.failed_and_drop on schedule at now() do insert into t-t-t select 666; -- execute failed, and should drop event +select pg_sleep(1); +select count(*) from pg_job where job_name = 'failed_and_drop'; +create event priv_a.failed_and_drop on schedule at now() on completion preserve do insert into t-t-t select 1689; -- execute failed, don't drop +select pg_sleep(1); +select count(*) from pg_job where job_name = 'failed_and_drop'; + \c event_b revoke all on schema priv_b from priv_a; -- Gitee From 9b3761886b0493a02b92eb00d20d552e7f41bbf4 Mon Sep 17 00:00:00 2001 From: zhang_xubo <2578876417@qq.com> Date: Fri, 13 Sep 2024 15:27:56 +0800 Subject: [PATCH 302/347] =?UTF-8?q?=E5=AE=B9=E5=99=A8=E5=8D=87=E7=BA=A7?= =?UTF-8?q?=E6=97=B6=E5=80=99=E7=BC=93=E5=AD=98=E6=9F=A5=E8=AF=A2=E7=9A=84?= =?UTF-8?q?=E5=AE=9E=E4=BE=8B=E8=A7=92=E8=89=B2=E5=92=8C=E7=8A=B6=E6=80=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docker/upgrade/upgrade_common.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docker/upgrade/upgrade_common.sh b/docker/upgrade/upgrade_common.sh index 3d601921cd..87dff17d60 100644 --- a/docker/upgrade/upgrade_common.sh +++ b/docker/upgrade/upgrade_common.sh @@ -731,8 +731,12 @@ function start_dbnode() { } function query_dn_role() { - gs_ctl query -D ${GAUSSDATA} > ${GAUSS_TMP_PATH}/temp_dn_role - dn_role=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') + if [ -f "$GAUSS_TMP_PATH/temp_dn_role" ]; then + dn_role=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') + else + gs_ctl query -D ${GAUSSDATA} > ${GAUSS_TMP_PATH}/temp_dn_role + dn_role=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') + fi if [[ "$dn_role" = "Normal" ]]; then dn_role="normal" -- Gitee From 81a21961a6cb5c59341f8c75ef122ec0745e4e9e Mon Sep 17 00:00:00 2001 From: zhubin79 <18784715772@163.com> Date: Tue, 10 Sep 2024 16:57:51 +0800 Subject: [PATCH 303/347] =?UTF-8?q?=E6=B5=AE=E7=82=B9=E6=95=B0=E5=AD=97?= =?UTF-8?q?=E9=9D=A2=E9=87=8F=E6=BA=A2=E5=87=BA=E4=B8=BA0=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/scan.l | 13 +-- src/test/regress/expected/float_literals.out | 95 ++++++++++++++++++-- src/test/regress/expected/hw_datatype.out | 16 ++-- src/test/regress/expected/hw_datatype_2.out | 16 ++-- src/test/regress/sql/float_literals.sql | 41 +++++++++ 5 files changed, 151 insertions(+), 30 deletions(-) diff --git a/src/common/backend/parser/scan.l b/src/common/backend/parser/scan.l index f258dd91a3..8165b09f0a 100755 --- a/src/common/backend/parser/scan.l +++ b/src/common/backend/parser/scan.l @@ -1636,15 +1636,10 @@ handle_float_overflow(char *token, core_yyscan_t yyscanner) } errno = 0; - val = strtod(result, &endptr); - - // if val < 1E-130, we assume it is 0 - if (val != 0 && log10(val) < -130) { - result = "0"; - } else if (errno == ERANGE) { - if (val == 0.0) { - result = "0"; - } else { + val = strtod(result, &endptr); + + if (errno == ERANGE) { + if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL) { yyerror("number overflow"); } } diff --git a/src/test/regress/expected/float_literals.out b/src/test/regress/expected/float_literals.out index 3864c7a739..e8b90d1495 100644 --- a/src/test/regress/expected/float_literals.out +++ b/src/test/regress/expected/float_literals.out @@ -30,15 +30,25 @@ ERROR: number overflow at or near "1.79E+400" LINE 1: SELECT 1.79E+400; ^ SELECT 1.79E-400; - ?column? ----------- - 0 -(1 row) - +ERROR: number overflow at or near "1.79E-400" +LINE 1: SELECT 1.79E-400; + ^ SELECT -1.79E+400; ERROR: number overflow at or near "1.79E+400" LINE 1: SELECT -1.79E+400; ^ +SELECT 1E-307; + ?column? +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + .0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 +(1 row) + +SELECT 1E-308; + ?column? +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + .00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 +(1 row) + SELECT '0.0'; ?column? ---------- @@ -81,6 +91,18 @@ SELECT '-1.79E+400'; -1.79E+400 (1 row) +SELECT '1E-307'; + ?column? +---------- + 1E-307 +(1 row) + +SELECT '1E-308'; + ?column? +---------- + 1E-308 +(1 row) + SELECT '0.0'::float8; float8 -------- @@ -120,6 +142,18 @@ ERROR: "-1.79E+400" is out of range for type double precision LINE 1: SELECT '-1.79E+400'::float8; ^ CONTEXT: referenced column: float8 +SELECT '1E-307'::float8; + float8 +-------- + 1e-307 +(1 row) + +SELECT '1E-308'::float8; + float8 +-------- + 1e-308 +(1 row) + SELECT TO_BINARY_FLOAT(3.14 DEFAULT y ON CONVERSION ERROR); ERROR: column "y" does not exist LINE 1: SELECT TO_BINARY_FLOAT(3.14 DEFAULT y ON CONVERSION ERROR); @@ -215,6 +249,9 @@ ERROR: number overflow at or near "3.14E+400" LINE 1: INSERT INTO t_float_literals VALUES (7, 3.14E+400); ^ INSERT INTO t_float_literals VALUES (8, 3.14E-400); +ERROR: number overflow at or near "3.14E-400" +LINE 1: INSERT INTO t_float_literals VALUES (8, 3.14E-400); + ^ INSERT INTO t_float_literals VALUES (9, -3.14E+400); ERROR: number overflow at or near "3.14E+400" LINE 1: INSERT INTO t_float_literals VALUES (9, -3.14E+400); @@ -240,8 +277,7 @@ SELECT * FROM t_float_literals ORDER bY id; 4 | -3.14e+40 5 | 3.14e+40 6 | -3.14e+40 - 8 | 0 -(7 rows) +(6 rows) UPDATE t_float_iterals SET c1 = 1.79E+400 WHERE id = 1; ERROR: number overflow at or near "1.79E+400" @@ -263,6 +299,51 @@ SELECT * FROM t_float_iterals ORDER BY c1; ERROR: relation "t_float_iterals" does not exist on datanode1 LINE 1: SELECT * FROM t_float_iterals ORDER BY c1; ^ +create table llvm_enh +( + l_bool boolean default false, + l_tint tinyint default 255, + l_sint smallint default 32767, + l_int integer default 2147483647, + l_bint bigint default 9223372036854775807, + l_num1 numeric(18,0) default 999999999999999999, + l_num2 numeric(19,19) default 0.9223372036854775807, + l_num3 numeric(38,0) default 99999999999999999999999999999999999999, + l_flo1 float4 default 999999, + l_flo2 float8 default 1E-307, + l_char char(39) default '170141183460469231731687303715884105728', + l_vchar varchar(40) default '-170141183460469231731687303715884105728', + l_text text default '-170141183460469231731687303715884105729', + l_date date default '2016-10-18', + l_time time default '21:21:21', + l_times timestamp default '2003-04-12 04:05:06', + l_timez timestamp with time zone default '2003-04-12 04:05:06 pst', + l_oid oid default 12345 +) ; +insert into llvm_enh(l_bool) values(true); +insert into llvm_enh(l_tint) values(0); +insert into llvm_enh(l_sint) values(-32768); +insert into llvm_enh(l_int) values(-2147483648); +insert into llvm_enh(l_bint) values(-9223372036854775808); +insert into llvm_enh(l_num1) values(-999999999999999999); +insert into llvm_enh(l_num2) values(-0.9223372036854775808); +insert into llvm_enh(l_num3) values(-99999999999999999999999999999999999999); +insert into llvm_enh(l_flo1) values(-999999); +insert into llvm_enh(l_flo2) values(1E+308); +select l_flo2,l_flo1 from llvm_enh where l_flo2 < l_flo1-999998 order by 1,2; + l_flo2 | l_flo1 +--------+-------- + 1e-307 | 999999 + 1e-307 | 999999 + 1e-307 | 999999 + 1e-307 | 999999 + 1e-307 | 999999 + 1e-307 | 999999 + 1e-307 | 999999 + 1e-307 | 999999 +(8 rows) + drop table t_float_literals; +drop table llvm_enh; \c regression drop database float_literals; diff --git a/src/test/regress/expected/hw_datatype.out b/src/test/regress/expected/hw_datatype.out index 2ad8c0da06..502000a346 100644 --- a/src/test/regress/expected/hw_datatype.out +++ b/src/test/regress/expected/hw_datatype.out @@ -222,19 +222,21 @@ CREATE TABLE test_type( INSERT INTO test_type VALUES(15.23448); INSERT INTO test_type VALUES(1E-323); INSERT INTO test_type VALUES(1E-324); +ERROR: number overflow at or near "1E-324" +LINE 1: INSERT INTO test_type VALUES(1E-324); + ^ INSERT INTO test_type VALUES(1E+308); INSERT INTO test_type VALUES(1E+309); ERROR: number overflow at or near "1E+309" LINE 1: INSERT INTO test_type VALUES(1E+309); ^ SELECT * FROM test_type order by 1; - my_double ------------ - 0 - 0 - 15.23448 - 1e+308 -(4 rows) + my_double +----------------------- + 9.88131291682493e-324 + 15.23448 + 1e+308 +(3 rows) DROP TABLE test_type; /* g.Type BINARY_INTEGER */ diff --git a/src/test/regress/expected/hw_datatype_2.out b/src/test/regress/expected/hw_datatype_2.out index b922236c0a..e29e13e349 100644 --- a/src/test/regress/expected/hw_datatype_2.out +++ b/src/test/regress/expected/hw_datatype_2.out @@ -663,19 +663,21 @@ CREATE TABLE test_type( INSERT INTO test_type VALUES(15.23448); INSERT INTO test_type VALUES(1E-323); INSERT INTO test_type VALUES(1E-324); +ERROR: number overflow at or near "1E-324" +LINE 1: INSERT INTO test_type VALUES(1E-324); + ^ INSERT INTO test_type VALUES(1E+308); INSERT INTO test_type VALUES(1E+309); ERROR: number overflow at or near "1E+309" LINE 1: INSERT INTO test_type VALUES(1E+309); ^ SELECT * FROM test_type order by 1; - my_double ------------ - 0 - 0 - 15.23448 - 1e+308 -(4 rows) + my_double +----------------------- + 9.88131291682493e-324 + 15.23448 + 1e+308 +(3 rows) DROP TABLE test_type; /* g.Type BINARY_INTEGER */ diff --git a/src/test/regress/sql/float_literals.sql b/src/test/regress/sql/float_literals.sql index 26d5feffb6..f111710cc0 100644 --- a/src/test/regress/sql/float_literals.sql +++ b/src/test/regress/sql/float_literals.sql @@ -9,6 +9,8 @@ SELECT -3.142596; SELECT 1.79E+400; SELECT 1.79E-400; SELECT -1.79E+400; +SELECT 1E-307; +SELECT 1E-308; SELECT '0.0'; SELECT '-0.0'; @@ -17,6 +19,8 @@ SELECT '-3.142596'; SELECT '1.79E+400'; SELECT '1.79E-400'; SELECT '-1.79E+400'; +SELECT '1E-307'; +SELECT '1E-308'; SELECT '0.0'::float8; SELECT '-0.0'::float8; @@ -25,6 +29,8 @@ SELECT '-3.142596'::float8; SELECT '1.79E+400'::float8; SELECT '1.79E-400'::float8; SELECT '-1.79E+400'::float8; +SELECT '1E-307'::float8; +SELECT '1E-308'::float8; SELECT TO_BINARY_FLOAT(3.14 DEFAULT y ON CONVERSION ERROR); SELECT TO_BINARY_FLOAT(-3.14 DEFAULT + ON CONVERSION ERROR); @@ -71,6 +77,41 @@ UPDATE t_float_iterals SET c1 = 1.79E+40 WHERE id = 3; UPDATE t_float_iterals SET c1 = '1.79E+40'::float8 WHERE id = 4; SELECT * FROM t_float_iterals ORDER BY c1; +create table llvm_enh +( + l_bool boolean default false, + l_tint tinyint default 255, + l_sint smallint default 32767, + l_int integer default 2147483647, + l_bint bigint default 9223372036854775807, + l_num1 numeric(18,0) default 999999999999999999, + l_num2 numeric(19,19) default 0.9223372036854775807, + l_num3 numeric(38,0) default 99999999999999999999999999999999999999, + l_flo1 float4 default 999999, + l_flo2 float8 default 1E-307, + l_char char(39) default '170141183460469231731687303715884105728', + l_vchar varchar(40) default '-170141183460469231731687303715884105728', + l_text text default '-170141183460469231731687303715884105729', + l_date date default '2016-10-18', + l_time time default '21:21:21', + l_times timestamp default '2003-04-12 04:05:06', + l_timez timestamp with time zone default '2003-04-12 04:05:06 pst', + l_oid oid default 12345 +) ; +insert into llvm_enh(l_bool) values(true); +insert into llvm_enh(l_tint) values(0); +insert into llvm_enh(l_sint) values(-32768); +insert into llvm_enh(l_int) values(-2147483648); +insert into llvm_enh(l_bint) values(-9223372036854775808); +insert into llvm_enh(l_num1) values(-999999999999999999); +insert into llvm_enh(l_num2) values(-0.9223372036854775808); +insert into llvm_enh(l_num3) values(-99999999999999999999999999999999999999); +insert into llvm_enh(l_flo1) values(-999999); +insert into llvm_enh(l_flo2) values(1E+308); + +select l_flo2,l_flo1 from llvm_enh where l_flo2 < l_flo1-999998 order by 1,2; + drop table t_float_literals; +drop table llvm_enh; \c regression drop database float_literals; \ No newline at end of file -- Gitee From 16e864c8a87dc7e7482d8e04d5283cdc7afa2f37 Mon Sep 17 00:00:00 2001 From: cchen676 Date: Thu, 12 Sep 2024 17:24:45 +0800 Subject: [PATCH 304/347] =?UTF-8?q?=E5=90=88=E5=B9=B6dss=20aio=20post=20wr?= =?UTF-8?q?ite=E8=AF=B7=E6=B1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/builtin_funcs.ini | 2 +- src/common/backend/utils/adt/pgstatfuncs.cpp | 20 +++ src/gausskernel/ddes/adapter/ss_aio.cpp | 5 +- .../process/postmaster/pagewriter.cpp | 164 +++++++++++------- src/gausskernel/storage/buffer/bufmgr.cpp | 79 +++++---- .../storage/smgr/segment/data_file.cpp | 7 +- src/include/ddes/dms/ss_aio.h | 2 +- src/include/pgstat.h | 5 +- src/include/storage/buf/buf_internals.h | 2 + 9 files changed, 181 insertions(+), 105 deletions(-) diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 3b795da7f1..4de7bb084b 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -8037,7 +8037,7 @@ ), AddFuncGroup( "pg_buffercache_pages", 1, - AddBuiltinFunc(_0(4130), _1("pg_buffercache_pages"), _2(0), _3(false), _4(true), _5(pg_buffercache_pages), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(12, 23, 26, 23, 20, 26, 26, 23, 26, 16, 16, 21, 23), _22(12, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(12, "bufferid", "relfilenode", "bucketid", "storage_type", "reltablespace", "reldatabase", "relforknumber", "relblocknumber", "isdirty", "isvalid", "usage_count", "pinning_backends"), _24(NULL), _25("pg_buffercache_pages"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4130), _1("pg_buffercache_pages"), _2(0), _3(false), _4(true), _5(pg_buffercache_pages), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(15, 23, 26, 23, 20, 26, 26, 23, 26, 16, 16, 21, 23, 23, 26, 16), _22(15, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(15, "bufferid", "relfilenode", "bucketid", "storage_type", "reltablespace", "reldatabase", "relforknumber", "relblocknumber", "isdirty", "isvalid", "usage_count", "pinning_backends", "segfileno", "segblockno", "aio_in_process"), _24(NULL), _25("pg_buffercache_pages"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "pg_cancel_backend", 1, diff --git a/src/common/backend/utils/adt/pgstatfuncs.cpp b/src/common/backend/utils/adt/pgstatfuncs.cpp index 0c1809fc7e..0f127f4c0b 100644 --- a/src/common/backend/utils/adt/pgstatfuncs.cpp +++ b/src/common/backend/utils/adt/pgstatfuncs.cpp @@ -8707,6 +8707,9 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) TupleDescInitEntry(tupledesc, (AttrNumber)10, "isvalid", BOOLOID, -1, 0); TupleDescInitEntry(tupledesc, (AttrNumber)11, "usage_count", INT2OID, -1, 0); TupleDescInitEntry(tupledesc, (AttrNumber)12, "pinning_backends", INT4OID, -1, 0); + TupleDescInitEntry(tupledesc, (AttrNumber)13, "segfileno", INT4OID, -1, 0); + TupleDescInitEntry(tupledesc, (AttrNumber)14, "segblockno", OIDOID, -1, 0); + TupleDescInitEntry(tupledesc, (AttrNumber)15, "aio_in_process", BOOLOID, -1, 0); fctx->tupdesc = BlessTupleDesc(tupledesc); @@ -8753,6 +8756,8 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) fctx->record[i].blocknum = bufHdr->tag.blockNum; fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state); fctx->record[i].pinning_backends = BUF_STATE_GET_REFCOUNT(buf_state); + fctx->record[i].segfileno = bufHdr->extra->seg_fileno; + fctx->record[i].segblockno = bufHdr->extra->seg_blockno; if (buf_state & BM_DIRTY) fctx->record[i].isdirty = true; @@ -8765,6 +8770,12 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) else fctx->record[i].isvalid = false; + if (bufHdr->extra->aio_in_progress) { + fctx->record[i].aio_in_process = true; + } else { + fctx->record[i].aio_in_process = false; + } + UnlockBufHdr(bufHdr, buf_state); } @@ -8809,6 +8820,9 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) nulls[9] = false; nulls[10] = true; nulls[11] = true; + nulls[12] = true; + nulls[13] = true; + nulls[14] = true; } else { values[1] = ObjectIdGetDatum(fctx->record[i].relfilenode); nulls[1] = false; @@ -8832,6 +8846,12 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) nulls[10] = false; values[11] = Int32GetDatum(fctx->record[i].pinning_backends); nulls[11] = false; + values[12] = Int32GetDatum(fctx->record[i].segfileno); + nulls[12] = false; + values[13] = ObjectIdGetDatum((int64)fctx->record[i].segblockno); + nulls[13] = false; + values[14] = BoolGetDatum(fctx->record[i].aio_in_process); + nulls[14] = false; } /* Build and return the tuple. */ diff --git a/src/gausskernel/ddes/adapter/ss_aio.cpp b/src/gausskernel/ddes/adapter/ss_aio.cpp index e903c83aa5..49bdd1f5f6 100644 --- a/src/gausskernel/ddes/adapter/ss_aio.cpp +++ b/src/gausskernel/ddes/adapter/ss_aio.cpp @@ -46,10 +46,7 @@ static void WaitDSSAioComplete(DSSAioCxt *aio_cxt, int index) _exit(0); } - for (int i = 0; i < num; i++) { - aio_cxt->aiocb(&aio->events[i]); - } - + aio_cxt->aiocb(aio->events, num); event_num -= num; } diff --git a/src/gausskernel/process/postmaster/pagewriter.cpp b/src/gausskernel/process/postmaster/pagewriter.cpp index 6c9c35ae11..b468763690 100755 --- a/src/gausskernel/process/postmaster/pagewriter.cpp +++ b/src/gausskernel/process/postmaster/pagewriter.cpp @@ -642,6 +642,13 @@ static uint32 ckpt_qsort_dirty_page_for_flush(bool *is_new_relfilenode, uint32 f item->bucketNode = buf_desc->tag.rnode.bucketNode; item->forkNum = buf_desc->tag.forkNum; item->blockNum = buf_desc->tag.blockNum; + if (IsSegmentBufferID(buffer - 1)) { + item->seg_fileno = 1; + item->seg_blockno = buf_desc->tag.blockNum; + } else { + item->seg_fileno = buf_desc->extra->seg_fileno; + item->seg_blockno = buf_desc->extra->seg_blockno; + } if(IsSegmentFileNode(buf_desc->tag.rnode) || buf_desc->tag.rnode.opt != 0) { *is_new_relfilenode = true; } @@ -1644,70 +1651,102 @@ void crps_destory_ctxs() } } -static void incre_ckpt_aio_callback(struct io_event *event) +static int AioAsyncCompare(const void *a1, const void *a2) { - PgwrAioExtraData *tempAioExtra = (PgwrAioExtraData *)(event->data); - BufferDesc *buf_desc = (BufferDesc *)(tempAioExtra->aio_bufdesc); - uint32 written_size = event->obj->u.c.nbytes; - if (written_size != event->res) { - ereport(WARNING, (errmsg("aio write failed (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", -(int32)(event->res), - buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, - (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum))); - _exit(0); + const PgwrAioExtraData *arg1 = (PgwrAioExtraData *)(((const io_event *)a1)->data); + const PgwrAioExtraData *arg2 = (PgwrAioExtraData *)(((const io_event *)a2)->data); + + off_t roffset1 = 0; + off_t roffset2 = 0; + if (arg1->aio_fd == arg2->aio_fd) { + BufferDesc *buf_desc1 = (BufferDesc *)(arg1->aio_bufdesc); + BufferDesc *buf_desc2 = (BufferDesc *)(arg2->aio_bufdesc); + + if (IsSegmentBufferID(buf_desc1->buf_id)) { + roffset1 = ((buf_desc1->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; + } else { + roffset1 = ((buf_desc1->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; + } + + if (IsSegmentBufferID(buf_desc2->buf_id)) { + roffset2 = ((buf_desc2->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; + } else { + roffset2 = ((buf_desc2->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; + } + + return roffset1 - roffset2; } - off_t roffset = 0; - if (IsSegmentBufferID(buf_desc->buf_id)) { - roffset = ((buf_desc->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; - } else { - roffset = ((buf_desc->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; - } - - int aioRet = dss_aio_post_pwrite(event->obj->data, tempAioExtra->aio_fd, event->obj->u.c.nbytes, roffset); - if (aioRet != 0) { - ereport(PANIC, (errmsg("failed to post write by asnyc io (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", errno, - buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, - (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum))); - } - - buf_desc->extra->aio_in_progress = false; - -#ifdef USE_ASSERT_CHECKING - char *write_buf = (char *)(event->obj->u.c.buf); - char *origin_buf = (char *)palloc(BLCKSZ + ALIGNOF_BUFFER); - char *read_buf = (char *)BUFFERALIGN(origin_buf); - if (IsSegmentBufferID(buf_desc->buf_id)) { - SegSpace *spc = spc_open(buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, false); - seg_physical_read(spc, buf_desc->tag.rnode, buf_desc->tag.forkNum, buf_desc->tag.blockNum, read_buf); - } else if (buf_desc->extra->seg_fileno != EXTENT_INVALID) { - (void)SmgrNetPageCheckRead(buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, - buf_desc->extra->seg_fileno, buf_desc->tag.forkNum, - buf_desc->extra->seg_blockno, (char *)read_buf); - } - if (XLByteEQ(PageGetLSN(read_buf), PageGetLSN(write_buf))) { - Assert(memcmp(write_buf, read_buf, BLCKSZ) == 0); - } else if (!PageIsNew(read_buf) && XLByteLT(PageGetLSN(read_buf), PageGetLSN(write_buf))) { - ereport(PANIC, (errmsg("[SS][%d/%d/%d/%d/%d %d-%d]aio write error", - buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, - (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum))); - } else { - /* PageGetLSN(read_buf) > PageGetLSN(write_buf). Here main work is to check what write_buf has wrote by aio, - * therefore, the lsn of read_buf read from disk must be more than or equal to the lsn of write_buf wrote by - * aio. So when PageGetLSN(read_buf) > PageGetLSN(write_buf), what should happend is aio write concurrence. - */ - ereport(LOG, (errmsg("[SS][%d/%d/%d/%d/%d %d-%d]aio write concurrence", - buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, - (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, - buf_desc->tag.forkNum, buf_desc->tag.blockNum))); + return arg1->aio_fd - arg2->aio_fd; +} + +static void reset_buffer_aio_inprocess(struct io_event event[], int left, int right) +{ + for (int i = left; i < right; i++) { + PgwrAioExtraData *tempAioExtra = (PgwrAioExtraData *)(event[i].data); + BufferDesc *buf_desc = (BufferDesc *)(tempAioExtra->aio_bufdesc); + buf_desc->extra->aio_in_progress = false; } - - pfree(origin_buf); -#endif +} - UnpinBuffer(buf_desc, true); +static void incre_ckpt_aio_callback(struct io_event event[], int num) +{ + qsort(event, num, sizeof(io_event), AioAsyncCompare); + int cur_fd = -1; + off_t cur_off = 0; + int cur_writesize = 0; + int left = 0; + + for (int i = 0; i < num; i++) { + PgwrAioExtraData *tempAioExtra = (PgwrAioExtraData *)(event[i].data); + BufferDesc *buf_desc = (BufferDesc *)(tempAioExtra->aio_bufdesc); + if (event->obj->u.c.nbytes != event->res) { + ereport(WARNING, (errmsg("aio write failed errno = %d, buffer:%d/%d/%d/%d/%d %d-%d", -(int32)(event->res), + buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, + (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum))); + _exit(0); + } + + off_t temp_off = 0; + if (IsSegmentBufferID(buf_desc->buf_id)) { + temp_off = ((buf_desc->tag.blockNum) % RELSEG_SIZE) * BLCKSZ; + } else { + temp_off = ((buf_desc->extra->seg_blockno) % RELSEG_SIZE) * BLCKSZ; + } + + if (cur_fd == -1) { + cur_fd = tempAioExtra->aio_fd; + cur_off = temp_off; + cur_writesize = event[i].obj->u.c.nbytes; + left = i; + continue; + } else if (cur_fd == tempAioExtra->aio_fd && temp_off == (cur_off + event[i].obj->u.c.nbytes)) { + cur_writesize += event[i].obj->u.c.nbytes; + continue; + } + + int aioRet = dss_aio_post_pwrite(event->obj->data, cur_fd, cur_writesize, cur_off); + if (aioRet != 0) { + ereport(PANIC, (errmsg("failed to post write by asnyc io (errno = %d), buffer: %d/%d/%d/%d/%d %d-%d", errno, + buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, + (int32)buf_desc->tag.rnode.bucketNode, (int32)buf_desc->tag.rnode.opt, + buf_desc->tag.forkNum, buf_desc->tag.blockNum))); + } + cur_fd = tempAioExtra->aio_fd; + reset_buffer_aio_inprocess(event, left, i); + cur_off = temp_off; + cur_writesize = event[i].obj->u.c.nbytes; + left = i; + } + + if (cur_fd != -1) { + int aioRet = dss_aio_post_pwrite(event->obj->data, cur_fd, cur_writesize, cur_off); + if (aioRet != 0) { + ereport(PANIC, (errmsg("failed to post write by asnyc io (errno = %d)", errno))); + } + reset_buffer_aio_inprocess(event, left, num); + } } void ckpt_pagewriter_main(void) @@ -2469,6 +2508,13 @@ PUSH_DIRTY: item->bucketNode = buf_desc->tag.rnode.bucketNode; item->forkNum = buf_desc->tag.forkNum; item->blockNum = buf_desc->tag.blockNum; + if (IsSegmentBufferID(buf_id)) { + item->seg_fileno = 1; + item->seg_blockno = buf_desc->tag.blockNum; + } else { + item->seg_fileno = buf_desc->extra->seg_fileno; + item->seg_blockno = buf_desc->extra->seg_blockno; + } if (IsSegmentFileNode(buf_desc->tag.rnode) || IS_COMPRESSED_RNODE(buf_desc->tag.rnode, buf_desc->tag.forkNum)) { *contain_hashbucket = true; } diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index c31b7b4103..8ac5fc3f30 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -4640,10 +4640,7 @@ uint32 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext* wb_c tag.blockNum = buf_desc->extra->seg_blockno; } - if (!t_thrd.dms_cxt.buf_in_aio) { - /* when enable DSS AIO, UnpinBuffer in AIO complete callback */ - UnpinBuffer(buf_desc, true); - } + UnpinBuffer(buf_desc, true); ScheduleBufferTagForWriteback(wb_context, &tag); @@ -7451,35 +7448,51 @@ int ckpt_buforder_comparator(const void *pa, const void *pb) const CkptSortItem *a = (CkptSortItem *)pa; const CkptSortItem *b = (CkptSortItem *)pb; - /* compare tablespace */ - if (a->tsId < b->tsId) { - return -1; - } else if (a->tsId > b->tsId) { - return 1; - } - /* compare relation */ - if (a->relNode < b->relNode) { - return -1; - } else if (a->relNode > b->relNode) { - return 1; - } - - /* compare bucket */ - if (a->bucketNode < b->bucketNode) { - return -1; - } else if (a->bucketNode > b->bucketNode) { - return 1; - } else if (a->forkNum < b->forkNum) { /* compare fork */ - return -1; - } else if (a->forkNum > b->forkNum) { - return 1; - /* compare block number */ - } else if (a->blockNum < b->blockNum) { - return -1; - } else { /* should not be the same block ... */ - return 1; + if (ENABLE_DMS) { + if (a->forkNum < b->forkNum) { /* compare fork */ + return -1; + } else if (a->forkNum > b->forkNum) { + return 1; + } else if (a->seg_fileno < b->seg_fileno) { + return -1; + } else if (a->seg_fileno > b->seg_fileno) { + return 1; + } else if (a->seg_blockno < b->seg_blockno) { + return -1; + } else { + return 1; + } + } else { + /* compare tablespace */ + if (a->tsId < b->tsId) { + return -1; + } else if (a->tsId > b->tsId) { + return 1; + } + /* compare relation */ + if (a->relNode < b->relNode) { + return -1; + } else if (a->relNode > b->relNode) { + return 1; + } + + /* compare bucket */ + if (a->bucketNode < b->bucketNode) { + return -1; + } else if (a->bucketNode > b->bucketNode) { + return 1; + } else if (a->forkNum < b->forkNum) { /* compare fork */ + return -1; + } else if (a->forkNum > b->forkNum) { + return 1; + /* compare block number */ + } else if (a->blockNum < b->blockNum) { + return -1; + } else { /* should not be the same block ... */ + return 1; + } + /* do not need to compare opt */ } - /* do not need to compare opt */ } /* @@ -7869,7 +7882,7 @@ void SSTryEliminateBuf(uint64 times) } if (flags & BM_TAG_VALID) { - if (!DmsReleaseOwner(tag, buf->buf_id)) { + if (buf->extra->aio_in_progress || !DmsReleaseOwner(tag, buf->buf_id)) { UnlockBufHdr(buf, buf_state); LWLockRelease(partition_lock); return; diff --git a/src/gausskernel/storage/smgr/segment/data_file.cpp b/src/gausskernel/storage/smgr/segment/data_file.cpp index e345a98c42..b69607cc69 100644 --- a/src/gausskernel/storage/smgr/segment/data_file.cpp +++ b/src/gausskernel/storage/smgr/segment/data_file.cpp @@ -511,12 +511,7 @@ void df_extend_internal(SegLogicFile *sf) if (new_sliceno >= sf->vector_capacity) { df_extend_file_vector(sf); } - int new_fd; - if (ENABLE_DSS) { - new_fd = dv_open_file(filename, O_RDWR | O_CREAT | DSS_FT_NODE_FLAG_INNER_INITED, SEGMENT_FILE_MODE); - } else { - new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); - } + int new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); if (new_fd < 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("[segpage] could not create file \"%s\": %m", filename))); } diff --git a/src/include/ddes/dms/ss_aio.h b/src/include/ddes/dms/ss_aio.h index 58d3c2b970..a8a38a1581 100644 --- a/src/include/ddes/dms/ss_aio.h +++ b/src/include/ddes/dms/ss_aio.h @@ -31,7 +31,7 @@ extern "C" { #endif -typedef void (*aio_callback)(struct io_event *event); +typedef void (*aio_callback)(struct io_event event[], int num); #define DSS_AIO_BATCH_SIZE 128 #define DSS_AIO_UTIL_NUM 2 typedef struct AioUtil { diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 3287c61562..c23238b0a3 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -2756,7 +2756,7 @@ extern void getSessionStatistics(Tuplestorestate* tupStore, TupleDesc tupDesc, extern Size sessionStatShmemSize(void); extern void sessionStatShmemInit(void); -#define NUM_BUFFERCACHE_PAGES_ELEM 12 +#define NUM_BUFFERCACHE_PAGES_ELEM 15 #define CONNECTIONINFO_LEN 8192 /* Maximum length of GUC parameter connection_info */ @@ -2776,6 +2776,9 @@ typedef struct { bool isdirty; uint16 usagecount; uint32 pinning_backends; + int segfileno; + uint32 segblockno; + bool aio_in_process; } BufferCachePagesRec; typedef struct { diff --git a/src/include/storage/buf/buf_internals.h b/src/include/storage/buf/buf_internals.h index 59a57df7d5..10063fa28a 100644 --- a/src/include/storage/buf/buf_internals.h +++ b/src/include/storage/buf/buf_internals.h @@ -344,6 +344,8 @@ typedef struct CkptSortItem { ForkNumber forkNum; BlockNumber blockNum; int buf_id; + uint8 seg_fileno; + BlockNumber seg_blockno; } CkptSortItem; /* -- Gitee From e6f2d78a714bc8f4b5d004221dc8a6eb1d4041e8 Mon Sep 17 00:00:00 2001 From: jiwenke Date: Sat, 14 Sep 2024 09:18:41 +0800 Subject: [PATCH 305/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3adps=E7=BA=BF?= =?UTF-8?q?=E7=A8=8B=E9=80=80=E5=87=BA=E5=BC=82=E5=B8=B8=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/pgxc_single/pool/execRemote.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/common/backend/pgxc_single/pool/execRemote.cpp b/src/common/backend/pgxc_single/pool/execRemote.cpp index f0bf199732..d97136f84d 100755 --- a/src/common/backend/pgxc_single/pool/execRemote.cpp +++ b/src/common/backend/pgxc_single/pool/execRemote.cpp @@ -1308,7 +1308,18 @@ void spq_adps_coordinator_thread_main() pthread_cond_wait(&t_thrd.spq_ctx.qc_ctx->pq_wait_cv, &t_thrd.spq_ctx.qc_ctx->spq_pq_mutex); pthread_mutex_unlock(&t_thrd.spq_ctx.qc_ctx->spq_pq_mutex); } else { - spq_adps_consumer(); + PG_TRY(); + { + spq_adps_consumer(); + } + PG_CATCH(); + { + pthread_mutex_unlock(&t_thrd.spq_ctx.qc_ctx->spq_pq_mutex); + spq_finishQcThread(); + t_thrd.spq_ctx.qc_ctx->is_exited = true; + PG_RE_THROW(); + } + PG_END_TRY(); pthread_mutex_unlock(&t_thrd.spq_ctx.qc_ctx->spq_pq_mutex); } } -- Gitee From fab02519e5544665ba260904b1869751503aa554 Mon Sep 17 00:00:00 2001 From: lukeman Date: Fri, 13 Sep 2024 18:39:44 +0800 Subject: [PATCH 306/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9Aselect?= =?UTF-8?q?=20=E2=80=A6=E2=80=A6=20connect=20by=E5=AD=90=E5=8F=A5=EF=BC=8C?= =?UTF-8?q?=E5=87=BA=E7=8E=B0rownum=E6=9D=A1=E4=BB=B6=EF=BC=8C=E6=9C=AA?= =?UTF-8?q?=E6=AD=A3=E5=B8=B8=E7=94=9F=E6=95=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../runtime/executor/nodeStartWithOp.cpp | 54 +++++++---- src/test/regress/expected/sw_bugfix-1.out | 25 +---- src/test/regress/expected/sw_bugfix-2.out | 95 +++++++++++-------- src/test/regress/sql/sw_bugfix-2.sql | 13 ++- 4 files changed, 104 insertions(+), 83 deletions(-) diff --git a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp index 4cdd7a09df..1d1bc1fc76 100644 --- a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp +++ b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp @@ -472,6 +472,23 @@ static List* peekNextLevel(TupleTableSlot* startSlot, PlanState* outerNode, int return queue; } +static TupleTableSlot* updateTuplePseudoColumnValue(TupleTableSlot* slot, StartWithOpState *node, + StartWithOpColumnType type, Datum value) +{ + AttrNumber attnum = node->sw_pseudoCols[type]->resno; + int attcount = slot->tts_tupleDescriptor->natts; + bool nulls[attcount] = {false}; + Datum values[attcount] = {0}; + bool replaces[attcount] = {false}; + HeapTuple oldtup = (HeapTuple)slot->tts_tuple; + replaces[attnum - 1] = true; + nulls[attnum - 1] = false; + values[attnum - 1] = value; + HeapTuple newtup = heap_modify_tuple(oldtup, slot->tts_tupleDescriptor, values, nulls, replaces); + slot->tts_tuple = newtup; + heap_freetuple_ext(oldtup); +} + /* * Construct CONNECT BY result set by depth-first order. * @@ -508,24 +525,27 @@ static bool depth_first_connect(int currentLevel, StartWithOpState *node, List* isCycle = true; continue; } + updateTuplePseudoColumnValue(dstSlot, node, SWCOL_ROWNUM, *dfsRowCount + 1); + RecursiveUnionState* runode = castNode(RecursiveUnionState, outerNode); + if (currentLevel == 1 || ExecStartWithRowLevelQual(runode, dstSlot)) { + tuplestore_puttupleslot(outputStore, dstSlot); + (*dfsRowCount)++; + int rowCountBefore = *dfsRowCount; + + /* Go into the depth NOW: sibling tuples won't get processed + * until all children are done */ + node->sw_rownum = rowCountBefore; + List* children = peekNextLevel(leader, outerNode, currentLevel); + bool expectCycle = depth_first_connect(currentLevel + 1, node, + children, + dfsRowCount); + if (expectCycle) { + node->sw_cycle_rowmarks = lappend_int(node->sw_cycle_rowmarks, rowCountBefore); + } - tuplestore_puttupleslot(outputStore, dstSlot); - (*dfsRowCount)++; - int rowCountBefore = *dfsRowCount; - - /* Go into the depth NOW: sibling tuples won't get processed - * until all children are done */ - node->sw_rownum = rowCountBefore; - List* children = peekNextLevel(leader, outerNode, currentLevel); - bool expectCycle = depth_first_connect(currentLevel + 1, node, - children, - dfsRowCount); - if (expectCycle) { - node->sw_cycle_rowmarks = lappend_int(node->sw_cycle_rowmarks, rowCountBefore); - } - - if (!children) { - node->sw_leaf_rowmarks = lappend_int(node->sw_leaf_rowmarks, rowCountBefore); + if (!children) { + node->sw_leaf_rowmarks = lappend_int(node->sw_leaf_rowmarks, rowCountBefore); + } } } return isCycle; diff --git a/src/test/regress/expected/sw_bugfix-1.out b/src/test/regress/expected/sw_bugfix-1.out index 805991a58f..216580cee5 100644 --- a/src/test/regress/expected/sw_bugfix-1.out +++ b/src/test/regress/expected/sw_bugfix-1.out @@ -1507,22 +1507,7 @@ SELECT start AS connect, prior AS start FROM CONNECT CONNECT CONNECT BY ROWNUM < 3 | 5 5 | 6 6 | 7 - 1 | 3 - 3 | 4 - 3 | 5 - 5 | 6 - 6 | 7 - 1 | 3 - 3 | 4 - 3 | 5 - 5 | 6 - 6 | 7 - 1 | 3 - 3 | 4 - 3 | 5 - 5 | 6 - 6 | 7 -(24 rows) +(9 rows) DROP TABLE IF EXISTS start; DROP TABLE IF EXISTS connect; @@ -1589,13 +1574,7 @@ select c_int from t_test_array_base connect by c_int[1:2]=array[1,2] and rownum {1,2,NULL,10,11} {1,2,2,10} {1,2,2,10} - {1,2,2,10} - {1,2,2,10} - {1,2,2,10} - {1,2,2,10} - {1,2,2,10} - {1,2,2,10} -(12 rows) +(6 rows) drop table t_test_array_base; -- test invalid columnref diff --git a/src/test/regress/expected/sw_bugfix-2.out b/src/test/regress/expected/sw_bugfix-2.out index 76c218f8bc..77eff3d920 100755 --- a/src/test/regress/expected/sw_bugfix-2.out +++ b/src/test/regress/expected/sw_bugfix-2.out @@ -1440,47 +1440,7 @@ SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY NOC 121 | 111 | 江宁区 | 4 | 4 131 | 121 | 东山街 | 5 | 5 141 | 131 | 江南摩卡 | 6 | 6 - 142 | 131 | 四季云顶 | 7 | 6 - 143 | 131 | 盛世江南 | 8 | 6 - 144 | 131 | 七里香都 | 9 | 6 - 145 | 131 | 西山枫林 | 10 | 6 - 146 | 131 | 醉墨小镇 | 11 | 6 - 147 | 131 | 布拉格调 | 12 | 6 - 148 | 131 | 清幽别院 | 13 | 6 - 149 | 131 | 璀璨天城 | 14 | 6 - 132 | 121 | 秣陵街 | 15 | 5 - 133 | 121 | 汤山街 | 16 | 5 - 135 | 121 | 禄口街 | 17 | 5 - 134 | 121 | 淳化街 | 18 | 5 - 136 | 121 | 江宁街 | 19 | 5 - 137 | 121 | 谷里街 | 20 | 5 - 138 | 121 | 湖熟街 | 21 | 5 - 139 | 121 | 横溪街 | 22 | 5 - 122 | 111 | 雨花台 | 23 | 4 - 123 | 111 | 鼓楼区 | 24 | 4 - 124 | 111 | 玄武区 | 25 | 4 - 125 | 111 | 建邺区 | 26 | 4 - 126 | 111 | 秦淮区 | 27 | 4 - 127 | 111 | 浦口区 | 28 | 4 - 128 | 111 | 浦口区 | 29 | 4 - 129 | 111 | 六合区 | 30 | 4 - 112 | 11 | 宿迁市 | 31 | 3 - 113 | 11 | 徐州市 | 32 | 3 - 114 | 11 | 苏州市 | 33 | 3 - 115 | 11 | 盐城市 | 34 | 3 - 117 | 11 | 常州市 | 35 | 3 - 116 | 11 | 无锡市 | 36 | 3 - 118 | 11 | 连云港 | 37 | 3 - 119 | 11 | 泰州市 | 38 | 3 - 12 | 1 | 山东省 | 39 | 2 - 13 | 1 | 安徽省 | 40 | 2 - 14 | 1 | 河南省 | 41 | 2 - 15 | 1 | 河北省 | 42 | 2 - 16 | 1 | 湖南省 | 43 | 2 - 17 | 1 | 湖北省 | 44 | 2 - 18 | 1 | 贵州省 | 45 | 2 - 19 | 1 | 武汉省 | 46 | 2 -(46 rows) +(6 rows) --test subquery pushdown SELECT subq_0.c1 as c0 @@ -2358,4 +2318,55 @@ select child, level, lpad(' ', level*3, ' ')||child c1 from hier start with pare ERROR: Not support refer startwith Pseudo column in order siblings by. select child, level, lpad(' ', level*3, ' ')||child c1, level c2 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c2; ERROR: Not support refer startwith Pseudo column in order siblings by. -drop table hier; \ No newline at end of file +drop table hier; +-- test connect by rownum clause +drop table if exists t_test_connect_by_rownum; +create table t_test_connect_by_rownum(id char(1)); +insert into t_test_connect_by_rownum values('a'),('b'),('c'); +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 1; + id | rownum | level +----+--------+------- + a | 1 | 1 + b | 2 | 1 + c | 3 | 1 +(3 rows) + +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 2; + id | rownum | level +----+--------+------- + a | 1 | 1 + b | 2 | 1 + c | 3 | 1 +(3 rows) + +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 3; + id | rownum | level +----+--------+------- + a | 1 | 1 + a | 2 | 2 + b | 3 | 1 + c | 4 | 1 +(4 rows) + +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 4; + id | rownum | level +----+--------+------- + a | 1 | 1 + a | 2 | 2 + a | 3 | 3 + b | 4 | 1 + c | 5 | 1 +(5 rows) + +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 5; + id | rownum | level +----+--------+------- + a | 1 | 1 + a | 2 | 2 + a | 3 | 3 + a | 4 | 4 + b | 5 | 1 + c | 6 | 1 +(6 rows) + +drop table t_test_connect_by_rownum; \ No newline at end of file diff --git a/src/test/regress/sql/sw_bugfix-2.sql b/src/test/regress/sql/sw_bugfix-2.sql index ec21278013..430b674630 100644 --- a/src/test/regress/sql/sw_bugfix-2.sql +++ b/src/test/regress/sql/sw_bugfix-2.sql @@ -881,4 +881,15 @@ insert into hier values('China','AK47'); insert into hier values('China','天津'); select child, level, lpad(' ', level*3, ' ')||child c1 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c1; select child, level, lpad(' ', level*3, ' ')||child c1, level c2 from hier start with parent is null connect by prior child = parent ORDER SIBLINGS BY c2; -drop table hier; \ No newline at end of file +drop table hier; + +-- test connect by rownum clause +drop table if exists t_test_connect_by_rownum; +create table t_test_connect_by_rownum(id char(1)); +insert into t_test_connect_by_rownum values('a'),('b'),('c'); +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 1; +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 2; +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 3; +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 4; +select id, rownum, level from t_test_connect_by_rownum connect by rownum < 5; +drop table t_test_connect_by_rownum; \ No newline at end of file -- Gitee From fb1998108c37b580e6319c6ba7b64f605b909b26 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Sat, 14 Sep 2024 09:18:33 +0800 Subject: [PATCH 307/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BEredo=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E5=A4=87=E6=9C=BA=E8=AF=BB=E4=B8=9A=E5=8A=A1=E5=90=8C?= =?UTF-8?q?=E6=97=B6=E8=A7=A6=E5=8F=91reform=EF=BC=8C=E5=AF=BC=E4=B8=BB?= =?UTF-8?q?=E8=8A=82=E7=82=B9=E6=AD=BB=E9=94=81=EF=BC=8C=E6=9C=80=E5=90=8E?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E9=80=80=E5=87=BA=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp | 3 +++ .../ddes/adapter/ss_dms_callback.cpp | 10 ++++++++++ src/gausskernel/storage/buffer/bufmgr.cpp | 18 +++++++++--------- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp index 2f4bcc528a..3b66eadbf8 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp @@ -553,6 +553,9 @@ bool SSOndemandRequestPrimaryRedo(BufferTag tag) return false; } else if (SS_STANDBY_ONDEMAND_NORMAL || SS_PRIMARY_MODE) { return true; + // retry after reform finish + } else if (SS_IN_REFORM) { + return false; } ereport(DEBUG1, diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 6a6c40bdcf..f15ae1caf6 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -2168,6 +2168,16 @@ int CBOndemandRedoPageForStandby(void *block_key, int32 *redo_status) return GS_SUCCESS;; } + if (SS_IN_REFORM) { + ereport(WARNING, (errmodule(MOD_DMS), + errmsg("[SS][On-demand][%u/%u/%u/%d %d-%u] Reform happend when primary redo page for standby," + "return ONDEMAND_REDO_FAIL.", + tag->rnode.spcNode, tag->rnode.dbNode, + tag->rnode.relNode, tag->rnode.bucketNode, tag->forkNum, tag->blockNum))); + *redo_status = ONDEMAND_REDO_FAIL; + return GS_SUCCESS; + } + Buffer buffer = InvalidBuffer; uint32 saveInterruptHoldoffCount = t_thrd.int_cxt.InterruptHoldoffCount; *redo_status = ONDEMAND_REDO_DONE; diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index c31b7b4103..ec58919993 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -6373,11 +6373,11 @@ retry: * hold the content shared lock all the time, give the MES from the primary a chance to get it, * and the timeout time of the primary and standby servers is modified to open the unlocking * time window. - */ + */ if (!dms_standby_retry_read && SS_STANDBY_MODE) { - dms_standby_retry_read = true; - mode = BUFFER_LOCK_EXCLUSIVE; - } + dms_standby_retry_read = true; + mode = BUFFER_LOCK_EXCLUSIVE; + } pg_usleep(5000L); goto retry; } else if (dms_standby_retry_read) { @@ -6387,11 +6387,11 @@ retry: * * A good idea would be to add the ability to lock downgrade for LWLock. */ - mode = origin_mode; - dms_standby_retry_read = false; - LWLockRelease(buf->content_lock); - goto retry; - } + mode = origin_mode; + dms_standby_retry_read = false; + LWLockRelease(buf->content_lock); + goto retry; + } } } -- Gitee From 6015a7317441a64602f6b8539352fa464b9a965c Mon Sep 17 00:00:00 2001 From: lukeman Date: Sat, 14 Sep 2024 11:28:45 +0800 Subject: [PATCH 308/347] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9Ags=5Fpr?= =?UTF-8?q?obackup=20delete=E5=88=A0=E9=99=A4=E5=A4=87=E4=BB=BD=E6=8A=A5?= =?UTF-8?q?=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_probackup/delete.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/bin/pg_probackup/delete.cpp b/src/bin/pg_probackup/delete.cpp index 05f55a75eb..3c6a9d755a 100644 --- a/src/bin/pg_probackup/delete.cpp +++ b/src/bin/pg_probackup/delete.cpp @@ -40,10 +40,12 @@ do_delete(time_t backup_id) pgBackup *target_backup = NULL; size_t size_to_delete = 0; char size_to_delete_pretty[20]; - Oss::Oss* oss = getOssClient(); + Oss::Oss* oss = nullptr; char* bucket_name = NULL; - + if (current.media_type == MEDIA_TYPE_OSS) { + oss = getOssClient(); + } /* Get complete list of backups */ backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID); @@ -1112,9 +1114,12 @@ do_delete_status(InstanceConfig *instance_config, const char *status) size_t size_to_delete = 0; char size_to_delete_pretty[20]; pgBackup *backup; - Oss::Oss* oss = getOssClient(); + Oss::Oss* oss = nullptr; char* bucket_name = NULL; + if (current.media_type == MEDIA_TYPE_OSS) { + oss = getOssClient(); + } BackupStatus status_for_delete = str2status(status); delete_list = parray_new(); -- Gitee From 72477084f6722ed084ff7957cbde4f3a217d53ff Mon Sep 17 00:00:00 2001 From: lyannaa <1016943941@qq.com> Date: Sun, 15 Sep 2024 11:15:41 +0800 Subject: [PATCH 309/347] strip useless code --- .../storage/access/ustore/knl_pruneuheap.cpp | 5 ----- .../storage/access/ustore/knl_uheap.cpp | 15 ++------------- src/include/access/ustore/knl_upage.h | 9 +-------- 3 files changed, 3 insertions(+), 26 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp index f0008ed4c8..801ff6cd33 100644 --- a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp @@ -895,11 +895,6 @@ static int UHeapPruneItem(const RelationBuffer *relbuf, OffsetNumber offnum, Tra * that can be freed. */ Assert(!TransactionIdIsValid(xid) || !TransactionIdIsInProgress(xid)); - if (TransactionIdIsValid(xid) && TransactionIdIsInProgress(xid)) { - ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("Tuple will be pruned but xid is inprogress, xid=%lu, oldestxmin=%lu, globalRecycleXid=%lu.", - xid, oldestXmin, pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid)))); - } /* short aligned */ *spaceFreed += SHORTALIGN(tup.disk_tuple_size); } diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 553a2d041a..0179f98c5c 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -565,10 +565,6 @@ Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkIn ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("The insert tuple is NULL"))); } Assert(utuple->tupTableType == UHEAP_TUPLE); - - if (t_thrd.ustore_cxt.urecvec) { - t_thrd.ustore_cxt.urecvec->Reset(false); - } TransactionId fxid = GetTopTransactionId(); @@ -2024,10 +2020,6 @@ TM_Result UHeapDelete(Relation relation, ItemPointer tid, CommandId cid, Snapsho int retryTimes = 0; Assert(ItemPointerIsValid(tid)); - - if (t_thrd.ustore_cxt.urecvec) { - t_thrd.ustore_cxt.urecvec->Reset(false); - } BlockNumber blkno = ItemPointerGetBlockNumber(tid); Page page = GetPageBuffer(relation, blkno, buffer); @@ -2464,10 +2456,6 @@ TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer ot Assert(newtup->tupTableType == UHEAP_TUPLE); Assert(ItemPointerIsValid(otid)); - if (t_thrd.ustore_cxt.urecvec) { - t_thrd.ustore_cxt.urecvec->Reset(false); - } - /* * Fetch the list of attributes to be checked for various operations. * @@ -4396,7 +4384,6 @@ CommandId UHeapTupleGetCid(UHeapTuple utuple, Buffer buffer) } Assert(IS_VALID_UNDO_REC_PTR(tdinfo.urec_add)); - VerifyMemoryContext(); UndoRecord *urec = New(CurrentMemoryContext)UndoRecord(); urec->Reset(tdinfo.urec_add); urec->SetMemoryContext(CurrentMemoryContext); @@ -4468,6 +4455,7 @@ void UHeapResetPreparedUndo() } else { for (int i = 0; i < t_thrd.ustore_cxt.undo_buffer_idx; i++) { if (BufferIsValid(t_thrd.ustore_cxt.undo_buffers[i].buf)) { +#ifdef USE_ASSERT_CHECKING BufferDesc *bufdesc = GetBufferDescriptor(t_thrd.ustore_cxt.undo_buffers[i].buf - 1); if (LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufdesc), LW_EXCLUSIVE)) { LWLock *lock = BufferDescriptorGetContentLock(bufdesc); @@ -4478,6 +4466,7 @@ void UHeapResetPreparedUndo() BufferGetBlockNumber(t_thrd.ustore_cxt.undo_buffers[i].buf), t_thrd.ustore_cxt.undo_buffers[i].buf, lock->state))); } +#endif t_thrd.ustore_cxt.undo_buffers[i].inUse = false; t_thrd.ustore_cxt.undo_buffers[i].zero = false; } diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index dbae60e162..a8e8f79fff 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -146,14 +146,7 @@ * ItemIdChangeLen * Change the length of itemid. */ -#define RowPtrChangeLen(_rowptr, _length) \ - do { \ - if (RowPtrGetOffset(_rowptr) + _length > BLCKSZ) { \ - elog(PANIC, "row pointer error, offset:%u, flags:%u, len:%u", RowPtrGetOffset(_rowptr), (_rowptr)->flags, \ - (_length)); \ - } \ - (_rowptr)->len = (_length); \ - } while (0) +#define RowPtrChangeLen(_rowptr, _length) ((_rowptr)->len = (_length)) /* * RowPtrIsDead -- Gitee From bd89025f62c25ba033160e2fc561c3ea95e13794 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Sun, 15 Sep 2024 14:39:49 +0800 Subject: [PATCH 310/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BC=A0=E7=BB=9F?= =?UTF-8?q?=E9=9B=86=E7=BE=A4build?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/replication/basebackup.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/replication/basebackup.cpp b/src/gausskernel/storage/replication/basebackup.cpp index 4296d957ac..2532c6a2a5 100755 --- a/src/gausskernel/storage/replication/basebackup.cpp +++ b/src/gausskernel/storage/replication/basebackup.cpp @@ -1724,10 +1724,10 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab /* * skip sending directories inside pg_tblspc, if not required. */ - if (strcmp(pathbuf, "./pg_tblspc") == 0 || - (ENABLE_DSS && strcmp(pathbuf, dssdir) == 0 && - strstr(pathbuf + strlen(dssdir), "/pg_tblspc") != NULL && - !sendtblspclinks)) { + if (!sendtblspclinks && + ((ENABLE_DSS && strncmp(pathbuf, dssdir, strlen(dssdir)) == 0 && + strstr(pathbuf + strlen(dssdir), "/pg_tblspc") != NULL) || + strcmp(pathbuf, "./pg_tblspc") == 0)) { skip_this_dir = true; } if (!skip_this_dir) -- Gitee From 930a4a50b73e7ffd7427fab995d6c1b4728eebe7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E9=94=A6=E6=B3=A2?= <570157209@qq.com> Date: Fri, 13 Sep 2024 09:25:48 +0800 Subject: [PATCH 311/347] =?UTF-8?q?=E4=BC=98=E5=8C=96readme?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/README.md b/README.md index a719d48058..ccc2f735b5 100644 --- a/README.md +++ b/README.md @@ -501,14 +501,6 @@ https://opengauss.org/zh/ master - gcc7.3 - - openEuler_arm: https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/binarylibs/gcc7.3/openGauss-third_party_binarylibs_openEuler_arm.tar.gz
- openEuler_x86: https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/binarylibs/gcc7.3/openGauss-third_party_binarylibs_openEuler_x86_64.tar.gz
- Centos_x86: https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/binarylibs/gcc7.3/openGauss-third_party_binarylibs_Centos7.6_x86_64.tar.gz
- openEuler 22.03 arm: https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/binarylibs/gcc7.3/openGauss-third_party_binarylibs_openEuler_2203_arm.tar.gz
- openEuler 22.03 x86: https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/binarylibs/gcc7.3/openGauss-third_party_binarylibs_openEuler_2203_x86_64.tar.gz - gcc10.3 openEuler_arm: https://opengauss.obs.cn-south-1.myhuaweicloud.com/latest/binarylibs/gcc10.3/openGauss-third_party_binarylibs_openEuler_arm.tar.gz
@@ -520,6 +512,7 @@ https://opengauss.org/zh/ +注:6.0.0及以后的版本请使用gcc10.3的三方库进行编译 现在我们已经拥有完整的openGauss代码,把它存储在以下目录中(以sda为例)。 @@ -609,9 +602,6 @@ openGauss-server中的build.sh是编译过程中的重要脚本工具。该工 **debug**版本: ``` - # gcc7.3.0版本 - ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib - # gcc10.3.1版本(一般用于openEuler + ARM架构) ./configure --gcc-version=10.3.1 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib @@ -623,9 +613,6 @@ openGauss-server中的build.sh是编译过程中的重要脚本工具。该工 **release**版本: ``` - # gcc7.3.0版本 - ./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib - # gcc10.3.1版本(一般用于openEuler + ARM架构) ./configure --gcc-version=10.3.1 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib @@ -637,9 +624,6 @@ openGauss-server中的build.sh是编译过程中的重要脚本工具。该工 **memcheck**版本: ``` - # gcc7.3.0版本 - ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib --enable-memory-check - # gcc10.3.1版本(一般用于openEuler + ARM架构) ./configure --gcc-version=10.3.1 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib --enable-memory-check -- Gitee From d42a002b454d15a9a0b5658e6fd416d8b0faeaf9 Mon Sep 17 00:00:00 2001 From: dongning12 Date: Wed, 18 Sep 2024 19:50:08 +0800 Subject: [PATCH 312/347] =?UTF-8?q?[=E8=B5=84=E6=BA=90=E6=B1=A0=E5=8C=96]?= =?UTF-8?q?=E9=80=82=E9=85=8Dbuf=5Fctrl=E4=B8=8A=E9=9D=A2in=5Frcy=E6=A0=87?= =?UTF-8?q?=E8=AE=B0=E7=9A=84=E6=B8=85=E7=90=86=E7=9A=84=E5=9B=9E=E8=B0=83?= =?UTF-8?q?=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ddes/adapter/ss_dms_callback.cpp | 36 +++++++++++++++---- src/gausskernel/ddes/adapter/ss_init.cpp | 2 +- src/gausskernel/ddes/ddes_commit_id | 2 +- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 6a6c40bdcf..d04d5f51be 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -1451,21 +1451,31 @@ static int32 CBBufRebuildDrcInternal(int begin, int len, unsigned char thread_in */ const int dms_invalid_thread_index = 255; const int dms_invalid_thread_num = 255; -static int32 CBBufRebuildDrcParallel(void* db_handle, unsigned char thread_index, unsigned char thread_num) +static void CBAllocBufRangeForThread(unsigned char thread_index, unsigned char thread_num, + int* buf_begin, int* buf_num) { Assert((thread_index == dms_invalid_thread_index && thread_num == dms_invalid_thread_num) || (thread_index != dms_invalid_thread_index && thread_num != dms_invalid_thread_num && thread_index < thread_num)); - int buf_num = TOTAL_BUFFER_NUM / thread_num; - int buf_begin = thread_index * buf_num; + int num = TOTAL_BUFFER_NUM / thread_num; + int begin = thread_index * num; if (thread_index == thread_num - 1) { - buf_num = TOTAL_BUFFER_NUM - buf_begin; + num = TOTAL_BUFFER_NUM - begin; } if (thread_index == dms_invalid_thread_index && thread_num == dms_invalid_thread_num) { - buf_begin = 0; - buf_num = TOTAL_BUFFER_NUM; + begin = 0; + num = TOTAL_BUFFER_NUM; } + *buf_begin = begin; + *buf_num = num; +} + +static int32 CBBufRebuildDrcParallel(void* db_handle, unsigned char thread_index, unsigned char thread_num) +{ + int buf_begin = 0; + int buf_num = 0; + CBAllocBufRangeForThread(thread_index, thread_num, &buf_begin, &buf_num); return CBBufRebuildDrcInternal(buf_begin, buf_num, thread_index); } @@ -2230,6 +2240,19 @@ int CBDoCheckpointImmediately(unsigned long long *ckpt_lsn) return GS_SUCCESS; } +int CBBufCtrlRcyClean(void *db_handle, unsigned char thread_index, unsigned char thread_num) +{ + int buf_begin = 0; + int buf_num = 0; + CBAllocBufRangeForThread(thread_index, thread_num, &buf_begin, &buf_num); + int buf_end = buf_begin + buf_num - 1; + for (int i = buf_begin; i <= buf_end; i++) { + dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(i); + buf_ctrl->in_rcy = false; + } + return GS_SUCCESS; +} + void DmsInitCallback(dms_callback_t *callback) { // used in reform @@ -2298,4 +2321,5 @@ void DmsInitCallback(dms_callback_t *callback) callback->buf_ctrl_recycle = CBBufCtrlRecycle; callback->dms_thread_deinit = DmsThreadDeinit; callback->opengauss_do_ckpt_immediate = CBDoCheckpointImmediately; + callback->dms_ctl_rcy_clean_parallel = CBBufCtrlRcyClean; } diff --git a/src/gausskernel/ddes/adapter/ss_init.cpp b/src/gausskernel/ddes/adapter/ss_init.cpp index d982ec3744..4adb31ee27 100644 --- a/src/gausskernel/ddes/adapter/ss_init.cpp +++ b/src/gausskernel/ddes/adapter/ss_init.cpp @@ -407,7 +407,7 @@ static void setDMSProfile(dms_profile_t* profile) profile->max_session_cnt = DMS_MAX_SESSIONS; profile->time_stat_enabled = TRUE; profile->pipe_type = convertInterconnectType(); - profile->conn_created_during_init = TRUE; + profile->conn_created_during_init = false; setRdmaWorkConfig(profile); setScrlConfig(profile); SetOckLogPath(dms_attr, profile->ock_log_path); diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index da57c1e167..f4097434ff 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=6de342c050a9ff2ac5cb5b462a699e46c88bd156 +dms_commit_id=402eef8afae247c9592959c77fcf136aac9e2dd9 dss_commit_id=621eb9d6aac34726db404446511be2de9ae32a3f cbb_commit_id=2ea0e4ea6349f00ca85793480ee1ced952c3c8c7 -- Gitee From e14da8e932f338d3f8155898b0a2e396a8cd0457 Mon Sep 17 00:00:00 2001 From: Hemny Date: Thu, 19 Sep 2024 07:42:05 +0000 Subject: [PATCH 313/347] =?UTF-8?q?=E6=8E=A8=E7=82=B9CBB=EF=BC=8C=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E5=8D=95=E9=9B=86=E7=BE=A4=E6=9E=81=E7=AB=AF=E6=83=85?= =?UTF-8?q?=E5=86=B5=E4=B8=8B=E5=87=BA=E7=8E=B0=E5=8F=8C=E4=B8=BB=E7=9A=84?= =?UTF-8?q?=E6=83=85=E5=86=B5=20=E6=8E=A8=E7=82=B9CBB=EF=BC=8C=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E5=8D=95=E9=9B=86=E7=BE=A4=E6=9E=81=E7=AB=AF=E6=83=85?= =?UTF-8?q?=E5=86=B5=E4=B8=8B=E5=87=BA=E7=8E=B0=E5=8F=8C=E4=B8=BB=E7=9A=84?= =?UTF-8?q?=E6=83=85=E5=86=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Hemny --- src/gausskernel/ddes/ddes_commit_id | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index f4097434ff..2534f1b7ec 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ dms_commit_id=402eef8afae247c9592959c77fcf136aac9e2dd9 dss_commit_id=621eb9d6aac34726db404446511be2de9ae32a3f -cbb_commit_id=2ea0e4ea6349f00ca85793480ee1ced952c3c8c7 +cbb_commit_id=8ef2bfa90731cf48bad7c8d4d5f5b1509b8368b3 -- Gitee From 4abe729a34f193296254f6fd627689f6388e75ef Mon Sep 17 00:00:00 2001 From: leiziwei Date: Sat, 14 Sep 2024 17:18:10 +0800 Subject: [PATCH 314/347] =?UTF-8?q?record=E6=9F=A5=E8=AF=A2=E4=BC=AA?= =?UTF-8?q?=E5=88=97=E4=B8=8D=E8=83=BD=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/gram.y | 17 +++++- .../interfaces/ecpg/preproc/ecpg.trailer | 13 ++++ src/common/interfaces/ecpg/preproc/ecpg.type | 1 + src/common/interfaces/ecpg/preproc/parse.pl | 1 + .../expected/plpgsql_cursor_rowtype.out | 59 +++++++++++++++++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 43 ++++++++++++++ 6 files changed, 132 insertions(+), 2 deletions(-) diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index e1202bfce6..3f673423af 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -701,7 +701,7 @@ static char* IdentResolveToChar(char *ident, core_yyscan_t yyscanner); %type Sconst comment_text notify_payload %type RoleId TypeOwner opt_granted_by opt_boolean_or_string ColId_or_Sconst definer_user definer_expression UserId %type var_list guc_value_extension_list -%type ColId ColLabel var_name type_function_name param_name charset_collate_name +%type ColId ColLabel ColLabel_with_rownum var_name type_function_name param_name charset_collate_name %type var_value zone_value %type unreserved_keyword type_func_name_keyword @@ -29594,7 +29594,7 @@ database_name: access_method: ColId { $$ = $1; }; -attr_name: ColLabel { $$ = $1; }; +attr_name: ColLabel_with_rownum { $$ = $1; }; index_name: ColId { $$ = $1; }; @@ -29857,6 +29857,19 @@ ColLabel: IDENT $$ = pstrdup($1); } ; +ColLabel_with_rownum: IDENT + { + $$ = IdentResolveToChar($1, yyscanner); + } + | '\''IDENT'\'' { $$ = $2; } + | unreserved_keyword { $$ = pstrdup($1); } + | col_name_keyword { $$ = pstrdup($1); } + | type_func_name_keyword { $$ = pstrdup($1); } + | reserved_keyword + { + $$ = pstrdup($1); + } + ; DelimiterStmt: DELIMITER delimiter_str_names END_OF_INPUT { diff --git a/src/common/interfaces/ecpg/preproc/ecpg.trailer b/src/common/interfaces/ecpg/preproc/ecpg.trailer index a6ad4a2174..70622f624f 100644 --- a/src/common/interfaces/ecpg/preproc/ecpg.trailer +++ b/src/common/interfaces/ecpg/preproc/ecpg.trailer @@ -1571,6 +1571,19 @@ ColLabel: ECPGColLabel { $$ = $1; } | ECPGunreserved_interval { $$ = $1; } ; +ColLabel_with_rownum: ECPGColLabel { $$ = $1; } + | ECPGTypeName { $$ = $1; } + | CHAR_P { $$ = mm_strdup("char"); } + | CURRENT_P { $$ = mm_strdup("current"); } + | INPUT_P { $$ = mm_strdup("input"); } + | INT_P { $$ = mm_strdup("int"); } + | TO { $$ = mm_strdup("to"); } + | UNION { $$ = mm_strdup("union"); } + | VALUES { $$ = mm_strdup("values"); } + | ECPGCKeywords { $$ = $1; } + | ECPGunreserved_interval { $$ = $1; } + ; + ECPGColLabel: ECPGColLabelCommon { $$ = $1; } | unreserved_keyword { $$ = $1; } | reserved_keyword { $$ = $1; } diff --git a/src/common/interfaces/ecpg/preproc/ecpg.type b/src/common/interfaces/ecpg/preproc/ecpg.type index ac6aa000ac..bcb85e50e7 100644 --- a/src/common/interfaces/ecpg/preproc/ecpg.type +++ b/src/common/interfaces/ecpg/preproc/ecpg.type @@ -47,6 +47,7 @@ %type civarind %type ColId %type ColLabel +%type ColLabel_with_rownum %type connect_options %type connection_object %type connection_target diff --git a/src/common/interfaces/ecpg/preproc/parse.pl b/src/common/interfaces/ecpg/preproc/parse.pl index d92195c3d1..bde6de13ab 100644 --- a/src/common/interfaces/ecpg/preproc/parse.pl +++ b/src/common/interfaces/ecpg/preproc/parse.pl @@ -74,6 +74,7 @@ my %replace_types = ( 'type_function_name' => 'ignore', 'AnonyBlockStmt' => 'ignore', 'ColLabel' => 'ignore', + 'ColLabel_with_rownum' => 'ignore', 'Sconst' => 'ignore',); # these replace_line commands excise certain keywords from the core keyword diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 0069d6d18c..136abce61b 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -65,6 +65,65 @@ END; NOTICE: Result: (,,,,,) drop table test_2 cascade; set behavior_compat_options='allow_procedure_compile_check,disable_record_type_in_dml'; +create table t_CurRowtype_Def_Case0001(col1 int primary key,col2 varchar(100)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_currowtype_def_case0001_pkey" for table "t_currowtype_def_case0001" +insert into t_CurRowtype_Def_Case0001 values(1,'one'); +insert into t_CurRowtype_Def_Case0001 values(2,'two'); +insert into t_CurRowtype_Def_Case0001 values(3,'three'); +insert into t_CurRowtype_Def_Case0001 values(4,NULL); +--创建游标rowtype,select 伪列;expect:成功,输出4行 +declare + cursor cur_CurRowtype_Def_Case0001_1 is select col1,col2,rownum from t_CurRowtype_Def_Case0001; + source cur_CurRowtype_Def_Case0001_1%rowtype; +begin + open cur_CurRowtype_Def_Case0001_1; + loop + fetch cur_CurRowtype_Def_Case0001_1 into source; + exit when cur_CurRowtype_Def_Case0001_1%notfound; + raise notice '% , % ,% ',source.col1,source.col2,source.rownum; + end loop; + close cur_CurRowtype_Def_Case0001_1; +end; +/ +NOTICE: 1 , one ,1 +NOTICE: 2 , two ,2 +NOTICE: 3 , three ,3 +NOTICE: 4 , ,4 +drop table t_CurRowtype_Def_Case0001; +create table t_CurRowtype_Def_Case0002(col1 int primary key,rownum varchar(100)); +ERROR: syntax error at or near "rownum" +LINE 1: ...le t_CurRowtype_Def_Case0002(col1 int primary key,rownum var... + ^ +create table t_CurRowtype_Def_Case0002(col1 int primary key); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_currowtype_def_case0002_pkey" for table "t_currowtype_def_case0002" +insert into t_CurRowtype_Def_Case0002 values(1); +select t_CurRowtype_Def_Case0002.rownum from t_CurRowtype_Def_Case0002; +ERROR: column t_currowtype_def_case0002.rownum does not exist +LINE 1: select t_CurRowtype_Def_Case0002.rownum from t_CurRowtype_De... + ^ +CONTEXT: referenced column: rownum +select rownum from t_CurRowtype_Def_Case0002; + rownum +-------- + 1 +(1 row) + +drop table t_CurRowtype_Def_Case0002; +CREATE TYPE type_record AS ( + first text, + rownum int4 +) ; +ERROR: syntax error at or near "rownum" +LINE 3: rownum int4 + ^ +DECLARE + type rectype is record(rownum int,row2 text); + source rectype := (2, 'dsaw'); +BEGIN + raise notice '% , %',source.row2,source.rownum; +END; +/ +NOTICE: dsaw , 2 -- Prohibit virtual column insertion create table t1(col1 varchar(10),col varchar(10)); create table t2(col1 varchar(10),col varchar(10)); diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 72006957d2..7b7bf758fc 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -70,6 +70,49 @@ drop table test_2 cascade; set behavior_compat_options='allow_procedure_compile_check,disable_record_type_in_dml'; +create table t_CurRowtype_Def_Case0001(col1 int primary key,col2 varchar(100)); +insert into t_CurRowtype_Def_Case0001 values(1,'one'); +insert into t_CurRowtype_Def_Case0001 values(2,'two'); +insert into t_CurRowtype_Def_Case0001 values(3,'three'); +insert into t_CurRowtype_Def_Case0001 values(4,NULL); + +--创建游标rowtype,select 伪列;expect:成功,输出4行 +declare + cursor cur_CurRowtype_Def_Case0001_1 is select col1,col2,rownum from t_CurRowtype_Def_Case0001; + source cur_CurRowtype_Def_Case0001_1%rowtype; +begin + open cur_CurRowtype_Def_Case0001_1; + loop + fetch cur_CurRowtype_Def_Case0001_1 into source; + exit when cur_CurRowtype_Def_Case0001_1%notfound; + raise notice '% , % ,% ',source.col1,source.col2,source.rownum; + end loop; + close cur_CurRowtype_Def_Case0001_1; +end; +/ + +drop table t_CurRowtype_Def_Case0001; + +create table t_CurRowtype_Def_Case0002(col1 int primary key,rownum varchar(100)); +create table t_CurRowtype_Def_Case0002(col1 int primary key); +insert into t_CurRowtype_Def_Case0002 values(1); +select t_CurRowtype_Def_Case0002.rownum from t_CurRowtype_Def_Case0002; +select rownum from t_CurRowtype_Def_Case0002; +drop table t_CurRowtype_Def_Case0002; + +CREATE TYPE type_record AS ( + first text, + rownum int4 +) ; + +DECLARE + type rectype is record(rownum int,row2 text); + source rectype := (2, 'dsaw'); +BEGIN + raise notice '% , %',source.row2,source.rownum; +END; +/ + -- Prohibit virtual column insertion create table t1(col1 varchar(10),col varchar(10)); create table t2(col1 varchar(10),col varchar(10)); -- Gitee From ffdb495ae9cd80e022b3722f8f576076bbec1a63 Mon Sep 17 00:00:00 2001 From: wenger Date: Wed, 4 Sep 2024 15:44:50 +0800 Subject: [PATCH 315/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=89=A9=E5=8C=96?= =?UTF-8?q?=E8=A7=86=E5=9B=BE=E9=87=8C=E5=AF=BC=E8=87=B4=E5=AE=95=E6=9C=BA?= =?UTF-8?q?=E7=9A=84=E5=86=85=E5=AD=98=E4=BD=BF=E7=94=A8=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/createas.cpp | 2 +- .../optimizer/commands/matview.cpp | 2 +- .../expected/matview_with_event_trigger.out | 67 +++++++++++++++++++ src/test/regress/parallel_schedule0 | 2 +- src/test/regress/parallel_schedule0B | 2 +- .../sql/matview_with_event_trigger.sql | 65 ++++++++++++++++++ 6 files changed, 136 insertions(+), 4 deletions(-) create mode 100644 src/test/regress/expected/matview_with_event_trigger.out create mode 100644 src/test/regress/sql/matview_with_event_trigger.sql diff --git a/src/gausskernel/optimizer/commands/createas.cpp b/src/gausskernel/optimizer/commands/createas.cpp index 09bcbc52ea..cef9eaf9ee 100644 --- a/src/gausskernel/optimizer/commands/createas.cpp +++ b/src/gausskernel/optimizer/commands/createas.cpp @@ -327,7 +327,7 @@ static void intorel_startup(DestReceiver* self, int operation, TupleDesc typeinf create->inhRelations = NIL; create->ofTypename = NULL; create->constraints = NIL; - create->options = into->options; + create->options = (List*)copyObject(into->options); create->oncommit = into->onCommit; create->row_compress = into->row_compress; create->tablespacename = into->tableSpaceName; diff --git a/src/gausskernel/optimizer/commands/matview.cpp b/src/gausskernel/optimizer/commands/matview.cpp index 14918b55b3..5859a3e307 100755 --- a/src/gausskernel/optimizer/commands/matview.cpp +++ b/src/gausskernel/optimizer/commands/matview.cpp @@ -67,8 +67,8 @@ typedef struct { Query* viewParse; /* the query which defines/populates data */ /* These fields are filled by intorel_startup: */ Relation rel; /* relation to write to */ - ObjectAddress reladdr; /* address of rel, for ExecCreateTableAs */ CommandId output_cid; /* cmin to insert in output tuples */ + ObjectAddress reladdr; /* address of rel, for ExecCreateTableAs */ int hi_options; /* heap_insert performance options */ BulkInsertState bistate; /* bulk insert state */ } DR_intorel; diff --git a/src/test/regress/expected/matview_with_event_trigger.out b/src/test/regress/expected/matview_with_event_trigger.out new file mode 100644 index 0000000000..8279e35a87 --- /dev/null +++ b/src/test/regress/expected/matview_with_event_trigger.out @@ -0,0 +1,67 @@ +create database matview_with_event_trigger; +\c matview_with_event_trigger +create table testTab1 +( + menu_name text, + menu_id varchar2(100), + ISLEAF varchar(100), + LEVELS varchar(100), + function_url varchar2(500), + menu_parent varchar2(100), + menu_listorder clob, + menu_departmentid char(5), + menu_functionid numeric, + menu_disabled int, + primary key(menu_name,menu_parent) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "testtab1_pkey" for table "testtab1" +create table testTab2 ( + function_moduleid int, + function_id numeric, + function_interact varchar(100), + function_name varchar(100), + function_description varchar(100) +); +insert into testTab1 values('蔬菜','1','0','0','https://www.tapd.cn/60475194/sparrow/tcase/view/1160475194001155488?url_cache_key=from_urlaaaaa%23','6','ASD-20240112001','A1','3.14159',0); +insert into testTab1 values('土豆','2','1','1','https://www.tapd.cn/60475194/sparrow/tcase/view/11604755488?url_cache_key=from_urlaaaaa%23','6','ASD-20240112001','A1','3.1415926535',0); +insert into testTab1 values('动物','3','0','0','htt4788?url_cache_key=from_urlaaaaa%23%%','3','ASD-20240112002','A2','3.14159',0); +insert into testTab1 values('蜗牛','4','1','1','http://4788?url_cache_key=from_urlaaaaa%23%%','3','ASD-20240112002','A2','3.14159',0); +insert into testTab1 values('青菜','5','1','1','http://4788?url_cache_key=fr_%23%%---//###@!','1','ASD-20240112003','A2','3.1415926535',0); +create publication pub_test for all tables with (ddl='all'); +CREATE MATERIALIZED VIEW test_mv1 +( + NAME, + ROOT, + ISLEAF, + LEVELS, + PATH, + URL, + ID, + PID, + FUNCTION_MODULEID, + FUNCTION_ID, + LISTORDER, + DEPARTMENTID, + INTERACT, + MENU_DEPARTMENTID, + FUNCTION_NAME, + FUNCTION_DESCRIPTION +) with (STORAGE_TYPE=ustore, INIT_TD=40) +AS +SELECT menu_name AS name, null, null, null, null, function_url AS url +, menu_id AS id, menu_parent AS pid, f.function_moduleid, f.function_id, menu_listorder AS listorder +, m.menu_departmentid AS departmentid, f.function_interact AS interact, menu_departmentid, f.function_name, f.function_description +FROM testTab1 m +LEFT JOIN testTab2 f ON m.menu_functionid = f.function_id; +select * from test_mv1; + name | root | isleaf | levels | path | url | id | pid | function_moduleid | function_id | listorder | departmentid | interact | menu_departmentid | function_name | function_description +------+------+--------+--------+------+----------------------------------------------------------------------------------------------------+----+-----+-------------------+-------------+-----------------+--------------+----------+-------------------+---------------+---------------------- + 蜗牛 | | | | | http://4788?url_cache_key=from_urlaaaaa%23%% | 4 | 3 | | | ASD-20240112002 | A2 | | A2 | | + 动物 | | | | | htt4788?url_cache_key=from_urlaaaaa%23%% | 3 | 3 | | | ASD-20240112002 | A2 | | A2 | | + 蔬菜 | | | | | https://www.tapd.cn/60475194/sparrow/tcase/view/1160475194001155488?url_cache_key=from_urlaaaaa%23 | 1 | 6 | | | ASD-20240112001 | A1 | | A1 | | + 青菜 | | | | | http://4788?url_cache_key=fr_%23%%---//###@! | 5 | 1 | | | ASD-20240112003 | A2 | | A2 | | + 土豆 | | | | | https://www.tapd.cn/60475194/sparrow/tcase/view/11604755488?url_cache_key=from_urlaaaaa%23 | 2 | 6 | | | ASD-20240112001 | A1 | | A1 | | +(5 rows) + +\c regression +drop database matview_with_event_trigger; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index ee7451077c..95bfab29a7 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -727,7 +727,7 @@ test: string_digit_to_numeric tablesample_3 tablesample_4 # Another group of parallel tests # ---------- #test: collate tablesample tablesample_1 tablesample_2 matview -test: matview_single +test: matview_single matview_with_event_trigger # ---------- # Another group of parallel tests diff --git a/src/test/regress/parallel_schedule0B b/src/test/regress/parallel_schedule0B index 54763a70b4..aa8de33f00 100644 --- a/src/test/regress/parallel_schedule0B +++ b/src/test/regress/parallel_schedule0B @@ -247,7 +247,7 @@ test: string_digit_to_numeric tablesample_3 tablesample_4 # Another group of parallel tests # ---------- #test: collate tablesample tablesample_1 tablesample_2 matview -test: matview_single +test: matview_single matview_with_event_trigger # ---------- # Another group of parallel tests diff --git a/src/test/regress/sql/matview_with_event_trigger.sql b/src/test/regress/sql/matview_with_event_trigger.sql new file mode 100644 index 0000000000..3d7f85b961 --- /dev/null +++ b/src/test/regress/sql/matview_with_event_trigger.sql @@ -0,0 +1,65 @@ +create database matview_with_event_trigger; + +\c matview_with_event_trigger + +create table testTab1 +( + menu_name text, + menu_id varchar2(100), + ISLEAF varchar(100), + LEVELS varchar(100), + function_url varchar2(500), + menu_parent varchar2(100), + menu_listorder clob, + menu_departmentid char(5), + menu_functionid numeric, + menu_disabled int, + primary key(menu_name,menu_parent) +); + +create table testTab2 ( + function_moduleid int, + function_id numeric, + function_interact varchar(100), + function_name varchar(100), + function_description varchar(100) +); + +insert into testTab1 values('蔬菜','1','0','0','https://www.tapd.cn/60475194/sparrow/tcase/view/1160475194001155488?url_cache_key=from_urlaaaaa%23','6','ASD-20240112001','A1','3.14159',0); +insert into testTab1 values('土豆','2','1','1','https://www.tapd.cn/60475194/sparrow/tcase/view/11604755488?url_cache_key=from_urlaaaaa%23','6','ASD-20240112001','A1','3.1415926535',0); +insert into testTab1 values('动物','3','0','0','htt4788?url_cache_key=from_urlaaaaa%23%%','3','ASD-20240112002','A2','3.14159',0); +insert into testTab1 values('蜗牛','4','1','1','http://4788?url_cache_key=from_urlaaaaa%23%%','3','ASD-20240112002','A2','3.14159',0); +insert into testTab1 values('青菜','5','1','1','http://4788?url_cache_key=fr_%23%%---//###@!','1','ASD-20240112003','A2','3.1415926535',0); + +create publication pub_test for all tables with (ddl='all'); + +CREATE MATERIALIZED VIEW test_mv1 +( + NAME, + ROOT, + ISLEAF, + LEVELS, + PATH, + URL, + ID, + PID, + FUNCTION_MODULEID, + FUNCTION_ID, + LISTORDER, + DEPARTMENTID, + INTERACT, + MENU_DEPARTMENTID, + FUNCTION_NAME, + FUNCTION_DESCRIPTION +) with (STORAGE_TYPE=ustore, INIT_TD=40) +AS +SELECT menu_name AS name, null, null, null, null, function_url AS url +, menu_id AS id, menu_parent AS pid, f.function_moduleid, f.function_id, menu_listorder AS listorder +, m.menu_departmentid AS departmentid, f.function_interact AS interact, menu_departmentid, f.function_name, f.function_description +FROM testTab1 m +LEFT JOIN testTab2 f ON m.menu_functionid = f.function_id; + +select * from test_mv1; + +\c regression +drop database matview_with_event_trigger; -- Gitee From 70771ca8f90a0f688e99003535cf44bb82be5a12 Mon Sep 17 00:00:00 2001 From: wenger Date: Thu, 5 Sep 2024 11:06:36 +0800 Subject: [PATCH 316/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=80=BB=E8=BE=91?= =?UTF-8?q?=E5=A4=8D=E5=88=B6=E7=8E=AF=E5=A2=83=EF=BC=8Ccreate=20schema?= =?UTF-8?q?=E6=8C=87=E5=AE=9A=E5=B1=9E=E4=B8=BB=E5=90=8C=E6=AD=A5=E5=A4=B1?= =?UTF-8?q?=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/ddldeparse.cpp | 12 +++++++----- .../optimizer/commands/schemacmds.cpp | 4 ++++ src/gausskernel/process/tcop/utility.cpp | 3 ++- .../A/ddl_create_schema.setup | 8 ++++++++ .../ddl_replication_sql/A/ddl_create_schema.sql | 1 + .../A/ddl_create_schema.teardown | 8 ++++++++ .../B/acceptable_diff/ddl_alter_schema.diff | 17 +++++++++++++++++ .../B/ddl_alter_table_fastcheck.setup | 6 +----- .../B/ddl_create_schema.setup | 8 ++++++++ .../ddl_replication_sql/B/ddl_create_schema.sql | 1 + .../B/ddl_create_schema.teardown | 8 ++++++++ 11 files changed, 65 insertions(+), 11 deletions(-) create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.teardown create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_schema.diff create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.setup create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.sql create mode 100644 src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.teardown diff --git a/src/gausskernel/optimizer/commands/ddldeparse.cpp b/src/gausskernel/optimizer/commands/ddldeparse.cpp index 548f7f6f9d..9948cb6aa4 100644 --- a/src/gausskernel/optimizer/commands/ddldeparse.cpp +++ b/src/gausskernel/optimizer/commands/ddldeparse.cpp @@ -891,7 +891,7 @@ static ObjTree* deparse_AlterSchemaStmt(Oid objectId, Node *parsetree) * Verbose syntax * CREATE SCHEMA %{if_not_exists}s %{name}I %{authorization}s */ -static ObjTree* deparse_CreateSchemaStmt(Oid objectId, Node *parsetree) +static ObjTree* deparse_CreateSchemaStmt(Oid objectId, Node *parsetree, bool *include_owner) { CreateSchemaStmt *node = (CreateSchemaStmt *) parsetree; ObjTree *ret; @@ -904,12 +904,14 @@ static ObjTree* deparse_CreateSchemaStmt(Oid objectId, Node *parsetree) node->schemaname ? node->schemaname : ""); auth = new_objtree("AUTHORIZATION"); - if (node->authid) + if (node->authid) { append_string_object(auth, "%{authorization_role}I", "authorization_role", node->authid); - else + *include_owner = false; + } else { append_not_present(auth, "%{authorization_role}I"); + } append_object_object(ret, "%{authorization}s", auth); @@ -3658,7 +3660,7 @@ static ObjTree* deparse_simple_command(CollectedCommand *cmd, bool *include_owne return deparse_CreateFunction(objectId, parsetree); case T_CreateSchemaStmt: - return deparse_CreateSchemaStmt(objectId, parsetree); + return deparse_CreateSchemaStmt(objectId, parsetree, include_owner); case T_CreateSeqStmt: return deparse_CreateSeqStmt(objectId, parsetree); @@ -3685,7 +3687,7 @@ static ObjTree* deparse_simple_command(CollectedCommand *cmd, bool *include_owne return (ObjTree*)((deparseCollectedCommand)(u_sess->hook_cxt.deparseCollectedCommandHook)) (DEPARSE_SIMPLE_COMMAND, cmd, NULL, NULL); } - elog(INFO, "unrecognized node type in deparse command: %d", + elog(LOG, "unrecognized node type in deparse command: %d", (int) nodeTag(parsetree)); } diff --git a/src/gausskernel/optimizer/commands/schemacmds.cpp b/src/gausskernel/optimizer/commands/schemacmds.cpp index 266efb5b5e..3688e3cf9d 100644 --- a/src/gausskernel/optimizer/commands/schemacmds.cpp +++ b/src/gausskernel/optimizer/commands/schemacmds.cpp @@ -386,6 +386,7 @@ void AlterSchemaCommand(AlterSchemaStmt* stmt) AclResult aclresult; const int STR_SCHEMA_NAME_LENGTH = 9; const int STR_SNAPSHOT_LENGTH = 8; + ObjectAddress address; if (withBlockchain && ((strncmp(nspName, "dbe_perf", STR_SCHEMA_NAME_LENGTH) == 0) || (strncmp(nspName, "snapshot", STR_SNAPSHOT_LENGTH) == 0))) { @@ -420,6 +421,9 @@ void AlterSchemaCommand(AlterSchemaStmt* stmt) (errcode(ERRCODE_RESERVED_NAME), errmsg("The system schema \"%s\" doesn't allow to alter to blockchain schema", nspName))); + ObjectAddressSet(address, NamespaceNameIndexId, HeapTupleGetOid(tup)); + EventTriggerCollectSimpleCommand(address, InvalidObjectAddress, (Node*)stmt); + Datum new_record[Natts_pg_namespace] = {0}; bool new_record_nulls[Natts_pg_namespace] = {false}; bool new_record_repl[Natts_pg_namespace] = {false}; diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index d413deede6..e7366e4d72 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -5217,6 +5217,7 @@ ProcessUtilitySlow(Node *parse_tree, } #else AlterSchemaCommand((AlterSchemaStmt*)parse_tree); + commandCollected = true; #endif break; @@ -6634,7 +6635,7 @@ ProcessUtilitySlow(Node *parse_tree, ExecUtilityStmtOnNodes(query_string, exec_nodes, sent_to_remote, false, EXEC_ON_ALL_NODES, false); } } else { - ExecAlterOwnerStmt((AlterOwnerStmt*)parse_tree); + address = ExecAlterOwnerStmt((AlterOwnerStmt*)parse_tree); } #else AlterOwnerStmt *stmt = (AlterOwnerStmt *) parse_tree; diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.setup b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.setup new file mode 100644 index 0000000000..5358fce0bf --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.setup @@ -0,0 +1,8 @@ +#!/bin/bsh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER test_u1 PASSWORD 'Aa123456'" +exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER test_u1 PASSWORD 'Aa123456'" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.sql b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.sql new file mode 100644 index 0000000000..831e0b2e20 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.sql @@ -0,0 +1 @@ +create schema test_own authorization test_u1; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.teardown b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.teardown new file mode 100644 index 0000000000..0d3a09fb85 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/ddl_create_schema.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $sub_node1_port "DROP USER test_u1" +exec_sql $case_use_db $sub_node1_port "DROP USER test_u1" diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_schema.diff b/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_schema.diff new file mode 100644 index 0000000000..d4bf13fdb3 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/acceptable_diff/ddl_alter_schema.diff @@ -0,0 +1,17 @@ +28,37c28 +< -- Name: regtest_unpriv_user; Type: SCHEMA; Schema: -; Owner: regtest_unpriv_user +< -- +< +< CREATE SCHEMA regtest_unpriv_user; +< +< +< ALTER SCHEMA regtest_unpriv_user OWNER TO regtest_unpriv_user; +< +< -- +< -- Name: test_sche1; Type: SCHEMA; Schema: -; Owner: regtest_unpriv_user +--- +> -- Name: test_sche1; Type: SCHEMA; Schema: -; Owner: ddl_test_user +44c35 +< ALTER SCHEMA test_sche1 OWNER TO regtest_unpriv_user; +--- +> ALTER SCHEMA test_sche1 OWNER TO ddl_test_user; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup index 4db8109ca8..db2469581c 100644 --- a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_alter_table_fastcheck.setup @@ -8,8 +8,4 @@ exec_sql_with_user $case_use_db $pub_node1_port "create schema fastcheck;set sea exec_sql_with_user $case_use_db $sub_node1_port "create schema fastcheck;set search_path=fastcheck;create table t1_full (a int, b text, myc int); insert into t1_full values (101, 'a', 1), (102, 'b', 2);" exec_sql_with_user $case_use_db $pub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text);insert into tkey1 values (1, 'a'), (2, 'b'), (3, 'c');alter table tkey1 replica identity default;" -exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text, myc int); insert into tkey1 values (101, '101a', 1), (102, '102b', 2);" - -exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" - -exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER regtest_unpriv_user PASSWORD 'gauss@123'" \ No newline at end of file +exec_sql_with_user $case_use_db $sub_node1_port "set search_path=fastcheck;create table tkey1 (a int primary key, b text, myc int); insert into tkey1 values (101, '101a', 1), (102, '102b', 2);" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.setup b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.setup new file mode 100644 index 0000000000..5358fce0bf --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.setup @@ -0,0 +1,8 @@ +#!/bin/bsh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE USER test_u1 PASSWORD 'Aa123456'" +exec_sql_with_user $case_use_db $sub_node1_port "CREATE USER test_u1 PASSWORD 'Aa123456'" \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.sql b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.sql new file mode 100644 index 0000000000..831e0b2e20 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.sql @@ -0,0 +1 @@ +create schema test_own authorization test_u1; \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.teardown b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.teardown new file mode 100644 index 0000000000..0d3a09fb85 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/ddl_create_schema.teardown @@ -0,0 +1,8 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql $case_use_db $sub_node1_port "DROP USER test_u1" +exec_sql $case_use_db $sub_node1_port "DROP USER test_u1" -- Gitee From 603b2b229a785d73090c0eb73b9c93363841f581 Mon Sep 17 00:00:00 2001 From: wenger Date: Wed, 4 Sep 2024 17:49:54 +0800 Subject: [PATCH 317/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=BC=80=E5=90=AFddl?= =?UTF-8?q?=E9=80=BB=E8=BE=91=E5=A4=8D=E5=88=B6=E5=90=8E=EF=BC=8C=E5=88=A0?= =?UTF-8?q?=E9=99=A4=E4=B8=8D=E5=AD=98=E5=9C=A8type=E6=97=B6=E6=8A=A5?= =?UTF-8?q?=E9=94=99=E4=B8=8D=E5=8F=8B=E5=A5=BD=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizer/commands/ddldeparse.cpp | 2 +- .../replication/logical/ddltrigger.cpp | 24 ++++++++++++++++--- .../expected/type_with_event_trigger.out | 7 ++++++ src/test/regress/parallel_schedule0 | 2 +- src/test/regress/parallel_schedule0B | 2 +- .../regress/sql/type_with_event_trigger.sql | 10 ++++++++ 6 files changed, 41 insertions(+), 6 deletions(-) create mode 100644 src/test/regress/expected/type_with_event_trigger.out create mode 100644 src/test/regress/sql/type_with_event_trigger.sql diff --git a/src/gausskernel/optimizer/commands/ddldeparse.cpp b/src/gausskernel/optimizer/commands/ddldeparse.cpp index 9948cb6aa4..52c782c040 100644 --- a/src/gausskernel/optimizer/commands/ddldeparse.cpp +++ b/src/gausskernel/optimizer/commands/ddldeparse.cpp @@ -929,7 +929,7 @@ static ObjTree* deparse_CreateSchemaStmt(Oid objectId, Node *parsetree, bool *in * If isgrant is true, then this function is called while deparsing GRANT * statement and some object names are replaced. */ -static const char* string_objtype(ObjectType objtype, bool isgrant) +const char* string_objtype(ObjectType objtype, bool isgrant) { switch (objtype) { case OBJECT_COLUMN: diff --git a/src/gausskernel/storage/replication/logical/ddltrigger.cpp b/src/gausskernel/storage/replication/logical/ddltrigger.cpp index 0a19ffc02f..fe4d033994 100644 --- a/src/gausskernel/storage/replication/logical/ddltrigger.cpp +++ b/src/gausskernel/storage/replication/logical/ddltrigger.cpp @@ -28,6 +28,8 @@ #include "tcop/ddldeparse.h" #include "utils/lsyscache.h" +const char* string_objtype(ObjectType objtype, bool isgrant); + /* * Check if the command can be published. * @@ -110,7 +112,6 @@ Datum publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) { EventTriggerData *trigdata; - char *command = psprintf("Drop table command start"); DropStmt *stmt; ListCell *cell1; @@ -126,11 +127,18 @@ publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) Node *object = (Node*)lfirst(cell1); ObjectAddress address; Relation relation = NULL; + StringInfoData commandbuf; + char *removetype = NULL; char *schemaname = NULL; char *objname = NULL; TypeName *typname = NULL; Node *ptype = NULL; + initStringInfo(&commandbuf); + + removetype = pstrdup(string_objtype(stmt->removeType, false)); + removetype = pg_strtolower(removetype); + if (stmt->removeType == OBJECT_TYPE) { /* for DROP TYPE */ Assert(IsA(object, List) && list_length((List*)object) >= 1); @@ -152,6 +160,15 @@ publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) &relation, AccessExclusiveLock, true); + if (!OidIsValid(address.objectId)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("%s \"%s\" does not exist", + removetype, objname))); + } + + appendStringInfo(&commandbuf, "Drop %s command start", removetype); + + pfree(removetype); /* Object does not exist, nothing to do */ if (relation) { @@ -170,16 +187,17 @@ publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) */ if (support) LogLogicalDDLMessage("deparse", address.objectId, DCT_TableDropStart, - command, strlen(command) + 1); + commandbuf.data, strlen(commandbuf.data) + 1); relation_close(relation, NoLock); } else if (stmt->removeType == OBJECT_TYPE) { support = type_support_ddl_replication(address.objectId); if (support) LogLogicalDDLMessage("deparse", address.objectId, - DCT_TypeDropStart, command, strlen(command) + 1); + DCT_TypeDropStart, commandbuf.data, strlen(commandbuf.data) + 1); } } + return PointerGetDatum(NULL); } diff --git a/src/test/regress/expected/type_with_event_trigger.out b/src/test/regress/expected/type_with_event_trigger.out new file mode 100644 index 0000000000..a247375c9b --- /dev/null +++ b/src/test/regress/expected/type_with_event_trigger.out @@ -0,0 +1,7 @@ +create database type_with_event_trigger; +\c type_with_event_trigger +create publication pub_test for all tables with (ddl='all'); +drop type type_not_exists cascade; +ERROR: type "type_not_exists" does not exist +\c regression +drop database type_with_event_trigger; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 95bfab29a7..50c823bb04 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -504,7 +504,7 @@ test: hw_procedure_define #test: hw_anonymous_block #test: hw_procedure# test: hw_grant_all hw_dynamic_sql hw_func_return_out -test: hw_package_function type_replace +test: hw_package_function type_replace type_with_event_trigger #show plan #test: plan_hint diff --git a/src/test/regress/parallel_schedule0B b/src/test/regress/parallel_schedule0B index aa8de33f00..0bd00fc718 100644 --- a/src/test/regress/parallel_schedule0B +++ b/src/test/regress/parallel_schedule0B @@ -50,7 +50,7 @@ test: hw_procedure_define #test: hw_anonymous_block #test: hw_procedure# test: hw_grant_all hw_dynamic_sql hw_func_return_out -test: hw_package_function type_replace +test: hw_package_function type_replace type_with_event_trigger #show plan #test: plan_hint diff --git a/src/test/regress/sql/type_with_event_trigger.sql b/src/test/regress/sql/type_with_event_trigger.sql new file mode 100644 index 0000000000..03c7090c21 --- /dev/null +++ b/src/test/regress/sql/type_with_event_trigger.sql @@ -0,0 +1,10 @@ +create database type_with_event_trigger; + +\c type_with_event_trigger + +create publication pub_test for all tables with (ddl='all'); + +drop type type_not_exists cascade; + +\c regression +drop database type_with_event_trigger; \ No newline at end of file -- Gitee From d9005042dc95cb6eef42efe0d9f2f6eabe131eec Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Thu, 19 Sep 2024 15:08:06 +0800 Subject: [PATCH 318/347] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=EF=BC=9A=E4=BF=AE=E5=A4=8Dyat=E6=89=A7=E8=A1=8Cshrink=E7=9B=B8?= =?UTF-8?q?=E5=85=B3=E8=AF=AD=E5=8F=A5=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98.=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/nodes/copyfuncs.cpp | 11 +++++++++++ src/common/backend/nodes/equalfuncs.cpp | 11 +++++++++++ src/common/backend/nodes/outfuncs.cpp | 11 +++++++++++ src/common/backend/nodes/readfuncs.cpp | 14 +++++++++++++- 4 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index fbbbde7e25..e94cb815ce 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -7815,6 +7815,14 @@ static CharsetClause *_copyCharsetClause(const CharsetClause* from) return newnode; } +static ShrinkStmt *_copyShrinkStmt(const ShrinkStmt* from) +{ + ShrinkStmt* newnode = makeNode(ShrinkStmt); + COPY_NODE_FIELD(relations); + COPY_SCALAR_FIELD(nowait); + return newnode; +} + static PrefixKey* _copyPrefixKey(const PrefixKey* from) { PrefixKey* newnode = makeNode(PrefixKey); @@ -9286,6 +9294,9 @@ void* copyObject(const void* from) case T_CharsetClause: retval = _copyCharsetClause((CharsetClause *)from); break; + case T_ShrinkStmt: + retval = _copyShrinkStmt((ShrinkStmt*) from); + break; #ifdef USE_SPQ case T_Motion: retval = _copyMotion((Motion*)from); diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index 81c433b459..b0f0562d98 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -3627,6 +3627,14 @@ static bool _equalCharsetClause(const CharsetClause* a, const CharsetClause* b) return true; } + +static bool _equalShrinkStmt(const ShrinkStmt* a, const ShrinkStmt* b) +{ + COMPARE_NODE_FIELD(relations); + COMPARE_SCALAR_FIELD(nowait); + return true; +} + static bool _equalPrefixKey(const PrefixKey* a, const PrefixKey* b) { COMPARE_NODE_FIELD(arg); @@ -4636,6 +4644,9 @@ bool equal(const void* a, const void* b) case T_CharsetClause: retval = _equalCharsetClause((const CharsetClause*) a, (const CharsetClause*) b); break; + case T_ShrinkStmt: + retval = _equalShrinkStmt((const ShrinkStmt*)a, (const ShrinkStmt*)b); + break; case T_PrefixKey: retval = _equalPrefixKey((PrefixKey *)a, (PrefixKey *)b); break; diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index e70984e914..c0f30f9d3b 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -6384,6 +6384,14 @@ static void _outCharsetClause(StringInfo str, CharsetClause* node) WRITE_LOCATION_FIELD(location); } +static void _outShrinkStmt(StringInfo str, ShrinkStmt* node) +{ + WRITE_NODE_TYPE("SHRINK"); + WRITE_NODE_FIELD(relations); + WRITE_BOOL_FIELD(nowait); +} + + static void _outPrefixKey(StringInfo str, PrefixKey* node) { WRITE_NODE_TYPE("PREFIXKEY"); @@ -7375,6 +7383,9 @@ static void _outNode(StringInfo str, const void* obj) case T_CharsetClause: _outCharsetClause(str, (CharsetClause*)obj); break; + case T_ShrinkStmt: + _outShrinkStmt(str, (ShrinkStmt*)obj); + break; case T_AutoIncrement: _outAutoIncrement(str, (AutoIncrement*)obj); break; diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index de7553112d..5b3cfff846 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -6471,6 +6471,16 @@ static CharsetClause* _readCharsetClause() READ_DONE(); } +static ShrinkStmt* _readShrinkStmt() +{ + READ_LOCALS(ShrinkStmt); + + READ_NODE_FIELD(relations); + READ_BOOL_FIELD(nowait); + + READ_DONE(); +} + static PrefixKey* _readPrefixKey() { READ_LOCALS(PrefixKey); @@ -7089,7 +7099,9 @@ Node* parseNodeString(void) return_value = _readRotateClause(); } else if (MATCH("UNROTATEINFO", 12)) { return_value = _readUnrotateClause(); - } else { + } else if (MATCH("SHRINK", 6)) { + return_value = _readShrinkStmt(); + } else { ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("parseNodeString(): badly formatted node string \"%s\"...", token))); -- Gitee From 89f38655fd11f9b71e80ae712f3e01362cf874b8 Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 19 Sep 2024 16:09:59 +0800 Subject: [PATCH 319/347] =?UTF-8?q?=E5=A4=84=E7=90=86fab02519=E5=9C=A8rele?= =?UTF-8?q?ase=E7=89=88=E6=9C=AC=E4=B8=8Bcore=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../runtime/executor/nodeStartWithOp.cpp | 73 +++++++++++-------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp index 1d1bc1fc76..76870b5b7b 100644 --- a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp +++ b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp @@ -475,18 +475,27 @@ static List* peekNextLevel(TupleTableSlot* startSlot, PlanState* outerNode, int static TupleTableSlot* updateTuplePseudoColumnValue(TupleTableSlot* slot, StartWithOpState *node, StartWithOpColumnType type, Datum value) { + HeapTuple tup = NULL; + TupleDesc tupDesc = slot->tts_tupleDescriptor; + + ResetResultSlotAttValueArray(node, node->sw_values, node->sw_isnull); + Datum *values = node->sw_values; + bool *isnull = node->sw_isnull; + + /* fetch physical tuple */ + tup = ExecFetchSlotTuple(slot); + heap_deform_tuple(tup, tupDesc, values, isnull); + AttrNumber attnum = node->sw_pseudoCols[type]->resno; - int attcount = slot->tts_tupleDescriptor->natts; - bool nulls[attcount] = {false}; - Datum values[attcount] = {0}; - bool replaces[attcount] = {false}; - HeapTuple oldtup = (HeapTuple)slot->tts_tuple; - replaces[attnum - 1] = true; - nulls[attnum - 1] = false; - values[attnum - 1] = value; - HeapTuple newtup = heap_modify_tuple(oldtup, slot->tts_tupleDescriptor, values, nulls, replaces); - slot->tts_tuple = newtup; - heap_freetuple_ext(oldtup); + + /* set proper value and mark isnull to false */ + values[attnum - 1] = Int32GetDatum(value); + isnull[attnum - 1] = false; + + /* create a local copy tuple and store it to tuplestore, mark shouldFree as 'true ' */ + tup = heap_form_tuple(tupDesc, values, isnull); + slot = ExecStoreTuple(tup, slot, InvalidBuffer, true); + return slot; } /* @@ -525,27 +534,31 @@ static bool depth_first_connect(int currentLevel, StartWithOpState *node, List* isCycle = true; continue; } - updateTuplePseudoColumnValue(dstSlot, node, SWCOL_ROWNUM, *dfsRowCount + 1); - RecursiveUnionState* runode = castNode(RecursiveUnionState, outerNode); - if (currentLevel == 1 || ExecStartWithRowLevelQual(runode, dstSlot)) { - tuplestore_puttupleslot(outputStore, dstSlot); - (*dfsRowCount)++; - int rowCountBefore = *dfsRowCount; - - /* Go into the depth NOW: sibling tuples won't get processed - * until all children are done */ - node->sw_rownum = rowCountBefore; - List* children = peekNextLevel(leader, outerNode, currentLevel); - bool expectCycle = depth_first_connect(currentLevel + 1, node, - children, - dfsRowCount); - if (expectCycle) { - node->sw_cycle_rowmarks = lappend_int(node->sw_cycle_rowmarks, rowCountBefore); + if (currentLevel != 1) { + dstSlot = updateTuplePseudoColumnValue(dstSlot, node, SWCOL_ROWNUM, *dfsRowCount + 1); + RecursiveUnionState* runode = castNode(RecursiveUnionState, outerNode); + if (!ExecStartWithRowLevelQual(runode, dstSlot)) { + return isCycle; } + } - if (!children) { - node->sw_leaf_rowmarks = lappend_int(node->sw_leaf_rowmarks, rowCountBefore); - } + tuplestore_puttupleslot(outputStore, dstSlot); + (*dfsRowCount)++; + int rowCountBefore = *dfsRowCount; + + /* Go into the depth NOW: sibling tuples won't get processed + * until all children are done */ + node->sw_rownum = rowCountBefore; + List* children = peekNextLevel(leader, outerNode, currentLevel); + bool expectCycle = depth_first_connect(currentLevel + 1, node, + children, + dfsRowCount); + if (expectCycle) { + node->sw_cycle_rowmarks = lappend_int(node->sw_cycle_rowmarks, rowCountBefore); + } + + if (!children) { + node->sw_leaf_rowmarks = lappend_int(node->sw_leaf_rowmarks, rowCountBefore); } } return isCycle; -- Gitee From 0f4950c5c16883ed859594bd64290757ec556c5f Mon Sep 17 00:00:00 2001 From: bowenliu Date: Thu, 19 Sep 2024 20:10:53 +0800 Subject: [PATCH 320/347] push dms commitid --- src/gausskernel/ddes/ddes_commit_id | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index 2534f1b7ec..c04756dec1 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ -dms_commit_id=402eef8afae247c9592959c77fcf136aac9e2dd9 +dms_commit_id=732601209670379e5d00b7abff66628890b101cc dss_commit_id=621eb9d6aac34726db404446511be2de9ae32a3f cbb_commit_id=8ef2bfa90731cf48bad7c8d4d5f5b1509b8368b3 -- Gitee From 1d01ca2322249b7df7fdfa2f1135f40f8037365b Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 19 Sep 2024 20:22:50 +0800 Subject: [PATCH 321/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=B9=B6=E8=A1=8C?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E4=B8=8Bctescan=E7=9A=84coredump=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/optimizer/plan/createplan.cpp | 8 +++++ src/gausskernel/optimizer/plan/subselect.cpp | 3 +- src/test/regress/expected/smp.out | 28 ++++++++++++++++++ src/test/regress/sql/smp.sql | 29 +++++++++++++++++++ 4 files changed, 66 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index 2e25e066fc..be1da63f46 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -6291,7 +6291,15 @@ static FunctionScan* make_functionscan(List* qptlist, List* qpqual, Index scanre * if functionscan is disallowed to smp, and cursorPlan has stream node, * rebuild non-smp plan. For example, subplan is not support smp. */ + + /* we must restore is_stream/is_stream_support cause they would be change during pgxc_planner */ + bool outer_is_stream = u_sess->opt_cxt.is_stream; + bool outer_is_stream_support = u_sess->opt_cxt.is_stream_support; + ce->plan = (Node*)ReBuildNonSmpPlanForCursorExpr(pstrdup(ce->raw_query_str)); + + u_sess->opt_cxt.is_stream = outer_is_stream; + u_sess->opt_cxt.is_stream_support = outer_is_stream_support; } return node; diff --git a/src/gausskernel/optimizer/plan/subselect.cpp b/src/gausskernel/optimizer/plan/subselect.cpp index 6fa6ecc992..1ccba2b199 100644 --- a/src/gausskernel/optimizer/plan/subselect.cpp +++ b/src/gausskernel/optimizer/plan/subselect.cpp @@ -711,8 +711,7 @@ static Node* make_subplan( /* Reset u_sess->opt_cxt.query_dop. */ u_sess->opt_cxt.query_dop = outerDop; - /* Reset is_stream/is_stream_support because cursorExpr in subquery would change them */ - set_default_stream(); + /* Isolate the params needed by this specific subplan */ plan_params = root->plan_params; root->plan_params = NIL; diff --git a/src/test/regress/expected/smp.out b/src/test/regress/expected/smp.out index 6a2e7aa457..2f608772fd 100644 --- a/src/test/regress/expected/smp.out +++ b/src/test/regress/expected/smp.out @@ -1461,6 +1461,34 @@ where no_o_id not in ( with tmp as (select w_id from bmsql_warehouse where bmsql 0 (1 row) +set query_dop = 1002; +create table store_sales(ss_quantity integer, ss_list_price decimal(7,2)); +create table item(i_brand_id integer); +create table catalog_sales(cs_quantity integer, cs_list_price decimal(7,2)); +with avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales) x) +select 'store' channel + from store_sales + ,item + group by i_brand_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) +union all + select 'catalog' channel + from catalog_sales + ,item + group by i_brand_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + ; + channel +--------- +(0 rows) + +drop table store_sales; +drop table item; +drop table catalog_sales; --clean set search_path=public; drop schema test_smp cascade; diff --git a/src/test/regress/sql/smp.sql b/src/test/regress/sql/smp.sql index 132d28c92f..96f738a5a6 100644 --- a/src/test/regress/sql/smp.sql +++ b/src/test/regress/sql/smp.sql @@ -234,6 +234,35 @@ where no_o_id not in ( with tmp as (select w_id from bmsql_warehouse where bmsql ( select count(*) from bmsql_item group by i_im_id,i_im_id having i_im_id like f1('0') ) tb2; +set query_dop = 1002; +create table store_sales(ss_quantity integer, ss_list_price decimal(7,2)); +create table item(i_brand_id integer); + +create table catalog_sales(cs_quantity integer, cs_list_price decimal(7,2)); + +with avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales) x) + +select 'store' channel + from store_sales + ,item + group by i_brand_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) +union all + select 'catalog' channel + from catalog_sales + ,item + group by i_brand_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + ; + +drop table store_sales; +drop table item; +drop table catalog_sales; + --clean set search_path=public; drop schema test_smp cascade; -- Gitee From f7f29f86166df8591590a5696195725ad9a13880 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Mon, 16 Sep 2024 11:16:58 +0800 Subject: [PATCH 322/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E4=BA=8B=E5=8A=A1?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E9=98=9F=E5=88=97=E6=BB=A1=E5=AF=BC=E8=87=B4?= =?UTF-8?q?=E5=AE=9E=E6=97=B6=E6=9E=84=E5=BB=BA=E5=8D=A1=E4=BD=8F=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98=EF=BC=9B=E8=B0=83=E6=95=B4=E5=AE=9E=E6=97=B6?= =?UTF-8?q?=E6=9E=84=E5=BB=BA=E4=B8=8Bxlog-record=E5=86=85=E5=AD=98?= =?UTF-8?q?=E7=94=B3=E8=AF=B7=E6=AD=A5=E9=95=BF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ondemand_extreme_rto/dispatcher.cpp | 9 +++-- .../ondemand_extreme_rto/page_redo.cpp | 8 +++-- .../ondemand_extreme_rto/xlog_read.cpp | 34 ++++++++++++++++++- .../access/ondemand_extreme_rto/page_redo.h | 5 +-- .../access/ondemand_extreme_rto/xlog_read.h | 2 +- 5 files changed, 47 insertions(+), 11 deletions(-) diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index 3c96cc7b10..4b456cd4d7 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -443,11 +443,11 @@ void HandleStartupInterruptsForExtremeRto() static void SetOndemandXLogParseFlagValue(uint32 maxParseBufNum) { - g_ondemandXLogParseMemFullValue = maxParseBufNum * ONDEMAND_FORCE_PRUNE_RATIO; + g_ondemandXLogParseMemFullValue = maxParseBufNum * ONDEMAND_HASHMAP_FORCE_PRUNE_RATIO; g_ondemandXLogParseMemCancelPauseVaule = maxParseBufNum * ONDEMAND_DISTRIBUTE_CANCEL_RATIO; g_ondemandXLogParseMemCancelPauseVaulePerPipeline = (maxParseBufNum - g_ondemandXLogParseMemFullValue) / get_batch_redo_num(); - g_ondemandRealtimeBuildQueueFullValue = REALTIME_BUILD_RECORD_QUEUE_SIZE * ONDEMAND_FORCE_PRUNE_RATIO; + g_ondemandRealtimeBuildQueueFullValue = REALTIME_BUILD_RECORD_QUEUE_SIZE * ONDEMAND_RECORD_QUEUE_FORCE_PRUNE_RATIO; } /* Run from the dispatcher thread. */ @@ -474,8 +474,7 @@ void StartRecoveryWorkers(XLogReaderState *xlogreader, uint32 privateLen) rc = memcpy_s(g_dispatcher->restoreControlFile, (size_t)sizeof(ControlFileData), &restoreControlFile, (size_t)sizeof(ControlFileData)); securec_check(rc, "", ""); } - g_dispatcher->maxItemNum = (get_batch_redo_num() + 4) * PAGE_WORK_QUEUE_SIZE * - ITEM_QUQUE_SIZE_RATIO; // 4: a startup, readmanager, txnmanager, txnworker + g_dispatcher->maxItemNum = 3 * REALTIME_BUILD_RECORD_QUEUE_SIZE; // 3: TrxnQueue, SegQueue, Hashmap(reuse) uint32 maxParseBufNum = (uint32)((uint64)g_instance.attr.attr_storage.dms_attr.ondemand_recovery_mem_size * 1024 / (sizeof(XLogRecParseState) + sizeof(ParseBufferDesc) + sizeof(RedoMemSlot))); XLogParseBufferInitFunc(&(g_dispatcher->parseManager), maxParseBufNum, &recordRefOperate, RedoInterruptCallBack); @@ -1843,7 +1842,7 @@ void CopyDataFromOldReader(XLogReaderState *newReaderState, const XLogReaderStat errno_t rc = EOK; if ((newReaderState->readRecordBuf == NULL) || (oldReaderState->readRecordBufSize > newReaderState->readRecordBufSize)) { - if (!allocate_recordbuf(newReaderState, oldReaderState->readRecordBufSize)) { + if (!ondemand_allocate_recordbuf(newReaderState, oldReaderState->readRecordBufSize)) { ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index e263dad3f3..32704b138b 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -1781,7 +1781,11 @@ static void TrxnManagerPruneAndDistributeIfRealtimeBuildFailover() static void TrxnManagerPruneIfQueueFullInRealtimeBuild() { - while (SS_ONDEMAND_RECOVERY_TRXN_QUEUE_FULL && SS_ONDEMAND_REALTIME_BUILD_NORMAL) { + /* + * we used OndemandTrxnQueueFullInRealtimeBuild instead of SS_ONDEMAND_RECOVERY_TRXN_QUEUE_FULL, because + * OndemandCtrlWorker may not get pause status immediately + */ + while (OndemandTrxnQueueFullInRealtimeBuild() && SS_ONDEMAND_REALTIME_BUILD_NORMAL) { TrxnManagerProcHashMapPrune(); RedoInterruptCallBack(); } @@ -1848,7 +1852,6 @@ bool TrxnManagerDistributeItemsBeforeEnd(RedoItem *item) TestXLogReaderProbe(UTEST_EVENT_RTO_TRXNMGR_DISTRIBUTE_ITEMS, __FUNCTION__, &item->record); #endif - TrxnManagerPruneIfQueueFullInRealtimeBuild(); TrxnManagerAddTrxnRecord(item, syncRecord); CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); } @@ -1904,6 +1907,7 @@ void TrxnManagerMain() } } CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + TrxnManagerPruneIfQueueFullInRealtimeBuild(); TrxnManagerPruneAndDistributeIfRealtimeBuildFailover(); if (!SPSCBlockingQueueIsEmpty(g_redoWorker->queue)) { GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp index 200996195e..2e977086f9 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp @@ -394,6 +394,38 @@ err: return -1; } +/* + * copy from allocate_recordbuf + * + * In ondemand realtime build, we need save readRecordBuf for segQueue and + * trxnQueue, so we allocate smaller (512) for save memory. + */ +bool ondemand_allocate_recordbuf(XLogReaderState *state, uint32 reclength) +{ + uint32 newSize = reclength; + const uint32 recordBufferAllocStep = 512; + + if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { + newSize += recordBufferAllocStep - (newSize % recordBufferAllocStep); + } else { + newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ); + } + newSize = Max(newSize, recordBufferAllocStep); + + if (state->readRecordBuf != NULL) { + pfree(state->readRecordBuf); + state->readRecordBuf = NULL; + } + state->readRecordBuf = (char *)palloc_extended(newSize, MCXT_ALLOC_NO_OOM); + if (state->readRecordBuf == NULL) { + state->readRecordBufSize = 0; + return false; + } + + state->readRecordBufSize = newSize; + return true; +} + XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg, char* xlogPath) { XLogRecord *record = NULL; @@ -521,7 +553,7 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * /* * Enlarge readRecordBuf as needed. */ - if (total_len > state->readRecordBufSize && !allocate_recordbuf(state, total_len)) { + if (total_len > state->readRecordBufSize && !ondemand_allocate_recordbuf(state, total_len)) { /* We treat this as a "bogus data" condition */ report_invalid_record(state, "record length %u at %X/%X too long", total_len, (uint32)(RecPtr >> 32), (uint32)RecPtr); diff --git a/src/include/access/ondemand_extreme_rto/page_redo.h b/src/include/access/ondemand_extreme_rto/page_redo.h index 25585c5a9d..0689cf9391 100644 --- a/src/include/access/ondemand_extreme_rto/page_redo.h +++ b/src/include/access/ondemand_extreme_rto/page_redo.h @@ -42,7 +42,8 @@ namespace ondemand_extreme_rto { #define ONDEMAND_DISTRIBUTE_CANCEL_RATIO 0.5 -#define ONDEMAND_FORCE_PRUNE_RATIO 0.99 +#define ONDEMAND_HASHMAP_FORCE_PRUNE_RATIO 0.99 +#define ONDEMAND_RECORD_QUEUE_FORCE_PRUNE_RATIO 0.95 #define ONDEMAND_HASHTAB_SWITCH_LIMIT 100000 #define SEG_PROC_PIPELINE_SLOT 0 #define ONDEMAND_LOG_PAUSE_STATUS_TIME 30 @@ -52,7 +53,7 @@ namespace ondemand_extreme_rto { #define ONDEMAND_HASHMAP_ENTRY_NEED_REDO 2 static const uint32 PAGE_WORK_QUEUE_SIZE = 65536; -static const uint32 REALTIME_BUILD_RECORD_QUEUE_SIZE = 4194304; +static const uint32 REALTIME_BUILD_RECORD_QUEUE_SIZE = 2097152; static const uint32 ONDEMAND_EXTREME_RTO_ALIGN_LEN = 16; /* need 128-bit aligned */ static const uint32 MAX_REMOTE_READ_INFO_NUM = 100; diff --git a/src/include/access/ondemand_extreme_rto/xlog_read.h b/src/include/access/ondemand_extreme_rto/xlog_read.h index 25b1f016ee..b640b3c602 100644 --- a/src/include/access/ondemand_extreme_rto/xlog_read.h +++ b/src/include/access/ondemand_extreme_rto/xlog_read.h @@ -32,7 +32,7 @@ namespace ondemand_extreme_rto { XLogRecord* XLogParallelReadNextRecord(XLogReaderState* xlogreader); XLogRecord *ReadNextXLogRecord(XLogReaderState **xlogreaderptr, int emode); XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg, char* xlogPath); - +bool ondemand_allocate_recordbuf(XLogReaderState *state, uint32 reclength); } // namespace ondemand_extreme_rto typedef struct XLogFileId { -- Gitee From 00031fa002e12abc19194d52d83882a0fa40c668 Mon Sep 17 00:00:00 2001 From: congzhou2603 Date: Thu, 19 Sep 2024 17:12:27 +0800 Subject: [PATCH 323/347] =?UTF-8?q?=E3=80=90bugfix=E3=80=91=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=8C=89=E9=9C=80=E5=9B=9E=E6=94=BEbuild=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E6=9C=89=E6=A6=82=E7=8E=87=E5=8D=A1=E5=9C=A8StartupSe?= =?UTF-8?q?ndMarkToBatchRedo=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/adapter/ss_dms_callback.cpp | 2 +- .../access/transam/ondemand_extreme_rto/page_redo.cpp | 10 ++++++++-- .../access/transam/ondemand_extreme_rto/redo_utils.cpp | 2 +- .../ondemand_extreme_rto/spsc_blocking_queue.cpp | 7 +++++++ src/include/access/ondemand_extreme_rto/dispatcher.h | 2 +- .../access/ondemand_extreme_rto/spsc_blocking_queue.h | 1 + 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 148b4fb812..3ccffbfe5f 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -2179,7 +2179,7 @@ int CBOndemandRedoPageForStandby(void *block_key, int32 *redo_status) } if (SS_IN_REFORM) { - ereport(WARNING, (errmodule(MOD_DMS), + ereport(DEBUG1, (errmodule(MOD_DMS), errmsg("[SS][On-demand][%u/%u/%u/%d %d-%u] Reform happend when primary redo page for standby," "return ONDEMAND_REDO_FAIL.", tag->rnode.spcNode, tag->rnode.dbNode, diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index e263dad3f3..6b6762d4f6 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -2608,10 +2608,16 @@ void StartupSendFowarder(RedoItem *item) AddPageRedoItem(g_dispatcher->auxiliaryLine.ctrlThd, item); } -void StartupSendMarkToBatchRedo(RedoItem *item) +void StartupSendHashmapPruneMarkToBatchRedo() { for (uint32 i = 0; i < g_dispatcher->pageLineNum; ++i) { - AddPageRedoItem(g_dispatcher->pageLines[i].batchThd, item); + if (SPSCBlockingQueueIsFull(g_dispatcher->pageLines[i].batchThd->queue)) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[On-demand]StartupSendHashmapPruneMarkToBatchRedo, " + "pageline %d is full, don't send mark.", i))); + continue; + } + AddPageRedoItem(g_dispatcher->pageLines[i].batchThd, &ondemand_extreme_rto::g_hashmapPruneMark); } } diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp index a89102a2be..913d0f4ac0 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/redo_utils.cpp @@ -734,7 +734,7 @@ XLogRecPtr GetRedoLocInCheckpointRecord(XLogReaderState *record) void OnDemandNotifyHashMapPruneIfNeed() { if (SS_ONDEMAND_RECOVERY_HASHMAP_FULL) { - ondemand_extreme_rto::StartupSendMarkToBatchRedo(&ondemand_extreme_rto::g_hashmapPruneMark); + ondemand_extreme_rto::StartupSendHashmapPruneMarkToBatchRedo(); } } diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/spsc_blocking_queue.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/spsc_blocking_queue.cpp index 8b9a340afe..d78c75a115 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/spsc_blocking_queue.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/spsc_blocking_queue.cpp @@ -229,6 +229,13 @@ bool SPSCBlockingQueueIsEmpty(SPSCBlockingQueue *queue) return (COUNT(head, tail, queue->mask) == 0); } +bool SPSCBlockingQueueIsFull(SPSCBlockingQueue *queue) +{ + uint32 head = pg_atomic_read_u32(&queue->writeHead); + uint32 tail = pg_atomic_read_u32(&queue->readTail); + return (SPACE(head, tail, queue->mask) == 0); +} + void *SPSCBlockingQueueTop(SPSCBlockingQueue *queue) { uint32 head; diff --git a/src/include/access/ondemand_extreme_rto/dispatcher.h b/src/include/access/ondemand_extreme_rto/dispatcher.h index 1ed16edee6..463cc2d829 100644 --- a/src/include/access/ondemand_extreme_rto/dispatcher.h +++ b/src/include/access/ondemand_extreme_rto/dispatcher.h @@ -265,7 +265,7 @@ List *CheckImcompleteAction(List *imcompleteActionList); void SetPageWorkStateByThreadId(uint32 threadState); void GetReplayedRecPtr(XLogRecPtr *startPtr, XLogRecPtr *endPtr); void StartupSendFowarder(RedoItem *item); -void StartupSendMarkToBatchRedo(RedoItem *item); +void StartupSendHashmapPruneMarkToBatchRedo(); XLogRecPtr GetSafeMinCheckPoint(); RedoWaitInfo redo_get_io_event(int32 event_id); void redo_get_worker_statistic(uint32 *realNum, RedoWorkerStatsData *worker, uint32 workerLen); diff --git a/src/include/access/ondemand_extreme_rto/spsc_blocking_queue.h b/src/include/access/ondemand_extreme_rto/spsc_blocking_queue.h index e0bb268726..f5efa9d6f9 100644 --- a/src/include/access/ondemand_extreme_rto/spsc_blocking_queue.h +++ b/src/include/access/ondemand_extreme_rto/spsc_blocking_queue.h @@ -52,6 +52,7 @@ void SPSCBlockingQueueDestroy(SPSCBlockingQueue *queue); bool SPSCBlockingQueuePut(SPSCBlockingQueue *queue, void *element); void *SPSCBlockingQueueTake(SPSCBlockingQueue *queue); bool SPSCBlockingQueueIsEmpty(SPSCBlockingQueue *queue); +bool SPSCBlockingQueueIsFull(SPSCBlockingQueue *queue); void *SPSCBlockingQueueTop(SPSCBlockingQueue *queue); void SPSCBlockingQueuePop(SPSCBlockingQueue *queue); void DumpQueue(const SPSCBlockingQueue *queue); -- Gitee From 9773a933486e9bf2dbbfdec25398eb19f1207b3b Mon Sep 17 00:00:00 2001 From: yuchao Date: Thu, 12 Sep 2024 09:58:25 +0800 Subject: [PATCH 324/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=A1=A8=E5=88=86?= =?UTF-8?q?=E5=8C=BA=E5=88=A0=E9=99=A4=E5=90=8E=EF=BC=8C=E6=89=A7=E8=A1=8C?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E8=AF=AD=E5=8F=A5=E6=8A=A5=E9=94=99partition?= =?UTF-8?q?=20does=20not=20exist=E7=9A=84=E9=97=AE=E9=A2=98=20=EF=BC=88che?= =?UTF-8?q?rry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../runtime/executor/nodeIndexscan.cpp | 28 ++++++++-- .../runtime/executor/nodeSeqscan.cpp | 53 +++++++++++++------ .../storage/access/heap/heapam.cpp | 16 ++++-- src/include/access/heapam.h | 3 +- 4 files changed, 77 insertions(+), 23 deletions(-) diff --git a/src/gausskernel/runtime/executor/nodeIndexscan.cpp b/src/gausskernel/runtime/executor/nodeIndexscan.cpp index 0d5684ea1b..908ff8444b 100644 --- a/src/gausskernel/runtime/executor/nodeIndexscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexscan.cpp @@ -1477,7 +1477,12 @@ void ExecInitPartitionForIndexScan(IndexScanState* index_state, EState* estate) /* get table partition and add it to a list for following scan */ tablepartitionid = getPartitionOidFromSequence(current_relation, partSeq, partitionno); - table_partition = PartitionOpenWithPartitionno(current_relation, tablepartitionid, partitionno, lock); + table_partition = + PartitionOpenWithPartitionno(current_relation, tablepartitionid, partitionno, lock, true); + /* Skip concurrent dropped partitions */ + if (table_partition == NULL) { + continue; + } index_state->ss.partitions = lappend(index_state->ss.partitions, table_partition); appendStringInfo(partNameInfo, "%s ", table_partition->pd_part->relname.data); @@ -1503,8 +1508,12 @@ void ExecInitPartitionForIndexScan(IndexScanState* index_state, EState* estate) int subpartitionno = lfirst_int(lc2); Relation tablepartrel = partitionGetRelation(current_relation, table_partition); Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq, subpartitionno); - Partition subpart = - PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, AccessShareLock); + Partition subpart = PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, + AccessShareLock, true); + /* Skip concurrent dropped partitions */ + if (subpart == NULL) { + continue; + } partitionIndexOidList = PartitionGetPartIndexList(subpart); @@ -1561,5 +1570,18 @@ void ExecInitPartitionForIndexScan(IndexScanState* index_state, EState* estate) index_state->iss_IndexPartitionList = lappend(index_state->iss_IndexPartitionList, index_partition); } } + /* + * Set the total scaned num of partition from level 1 partition, subpartition + * list is drilled down into node->subpartitions for each node_partition entry; + * + * Note: we do not set is value from select partittins from pruning-result as some of + * pre-pruned partitions could be dropped from conecurrent DDL, node->partitions + * is refreshed partition list to be scanned; + */ + if (index_state->ss.partitions != NULL) { + index_state->ss.part_id = list_length(index_state->ss.partitions); + } else { + index_state->ss.part_id = 0; + } } } diff --git a/src/gausskernel/runtime/executor/nodeSeqscan.cpp b/src/gausskernel/runtime/executor/nodeSeqscan.cpp index efd2673316..04886df524 100644 --- a/src/gausskernel/runtime/executor/nodeSeqscan.cpp +++ b/src/gausskernel/runtime/executor/nodeSeqscan.cpp @@ -549,6 +549,27 @@ static PruningResult *GetPartitionPruningResultInInitScanRelation(SeqScan *plan, return resultPlan; } +static void TraverseSubpartitions(Relation partRelation, LOCKMODE lockmode, List **subpartition, + List *subpart_seqs, List *subpartitionnos) +{ + ListCell *lc1 = NULL; + ListCell *lc2 = NULL; + forboth (lc1, subpart_seqs, lc2, subpartitionnos) { + Oid subpartitionid = InvalidOid; + int subpartSeq = lfirst_int(lc1); + int subpartitionno = lfirst_int(lc2); + + subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq, subpartitionno); + Partition subpart = + PartitionOpenWithPartitionno(partRelation, subpartitionid, subpartitionno, lockmode, true); + /* Skip concurrent dropped partitions */ + if (subpart == NULL) { + continue; + } + *subpartition = lappend(*subpartition, subpart); + } +} + /* ---------------------------------------------------------------- * InitScanRelation * @@ -643,7 +664,12 @@ void InitScanRelation(SeqScanState* node, EState* estate, int eflags) List* subpartition = NIL; tablepartitionid = getPartitionOidFromSequence(current_relation, partSeq, partitionno); - part = PartitionOpenWithPartitionno(current_relation, tablepartitionid, partitionno, lockmode); + part = PartitionOpenWithPartitionno(current_relation, tablepartitionid, partitionno, lockmode, true); + /* Skip concurrent dropped partitions */ + if (part == NULL) { + continue; + } + node->partitions = lappend(node->partitions, part); if (resultPlan->ls_selectedSubPartitions != NIL) { Relation partRelation = partitionGetRelation(current_relation, part); @@ -655,25 +681,22 @@ void InitScanRelation(SeqScanState* node, EState* estate, int eflags) List *subpart_seqs = subPartPruningResult->ls_selectedSubPartitions; List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos; Assert(list_length(subpart_seqs) == list_length(subpartitionnos)); - ListCell *lc1 = NULL; - ListCell *lc2 = NULL; - forboth (lc1, subpart_seqs, lc2, subpartitionnos) { - Oid subpartitionid = InvalidOid; - int subpartSeq = lfirst_int(lc1); - int subpartitionno = lfirst_int(lc2); - - subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq, subpartitionno); - Partition subpart = - PartitionOpenWithPartitionno(partRelation, subpartitionid, subpartitionno, lockmode); - subpartition = lappend(subpartition, subpart); - } + TraverseSubpartitions(partRelation, lockmode, &subpartition, subpart_seqs, subpartitionnos); releaseDummyRelation(&(partRelation)); node->subPartLengthList = lappend_int(node->subPartLengthList, list_length(subpartition)); node->subpartitions = lappend(node->subpartitions, subpartition); } } - if (resultPlan->ls_rangeSelectedPartitions != NULL) { - node->part_id = resultPlan->ls_rangeSelectedPartitions->length; + /* + * Set the total scaned num of partition from level 1 partition, subpartition + * list is drilled down into node->subpartitions for each node_partition entry; + * + * Note: we do not set is value from select partittins from pruning-result as some of + * pre-pruned partitions could be dropped from conecurrent DDL, node->partitions + * is refreshed partition list to be scanned; + */ + if (node->partitions != NULL) { + node->part_id = list_length(node->partitions); } else { node->part_id = 0; } diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp index 5e863fa11a..f74aff8ad3 100755 --- a/src/gausskernel/storage/access/heap/heapam.cpp +++ b/src/gausskernel/storage/access/heap/heapam.cpp @@ -10101,7 +10101,8 @@ Partition tryPartitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmod * Must make sure the partitionno is of the old partition entry, otherwise a wrong entry may be found! * If the partitionno is invalid, this function is degenerated into partitionOpen. */ -Partition PartitionOpenWithPartitionno(Relation relation, Oid partition_id, int partitionno, LOCKMODE lockmode) +Partition PartitionOpenWithPartitionno(Relation relation, Oid partitionOid, + int partitionno, LOCKMODE lockmode, bool missingOk) { Partition part = NULL; bool issubpartition = false; @@ -10109,24 +10110,31 @@ Partition PartitionOpenWithPartitionno(Relation relation, Oid partition_id, int Oid newpartOid = InvalidOid; /* first try open the partition */ - part = tryPartitionOpen(relation, partition_id, lockmode); + part = tryPartitionOpen(relation, partitionOid, lockmode); if (likely(PartitionIsValid(part))) { return part; } if (!PARTITIONNO_IS_VALID(partitionno)) { - ReportPartitionOpenError(relation, partition_id); + ReportPartitionOpenError(relation, partitionOid); } PARTITION_LOG( "partition %u does not exist on relation \"%s\", we will try to use partitionno %d to search the new partition", - partition_id, RelationGetRelationName(relation), partitionno); + partitionOid, RelationGetRelationName(relation), partitionno); /* if not found, search the new partition with partitionno */ issubpartition = RelationIsPartitionOfSubPartitionTable(relation); parttype = issubpartition ? PART_OBJ_TYPE_TABLE_SUB_PARTITION : PART_OBJ_TYPE_TABLE_PARTITION; newpartOid = GetPartOidWithPartitionno(RelationGetRelid(relation), partitionno, parttype); + if (missingOk && !OidIsValid(newpartOid)) { + ereport(LOG, (errcode(ERRCODE_PARTITION_ERROR), + errmsg("Partition oid %u is invalid when opening partition", newpartOid), + errdetail("There is a partition may have already been dropped on relation/partition \"%s\"", + RelationGetRelationName(relation)))); + return NULL; + } return partitionOpen(relation, newpartOid, lockmode); } diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 55590a6867..ff7c84a087 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -273,7 +273,8 @@ extern Partition partitionOpenWithRetry(Relation relation, Oid partitionId, LOCK extern Partition partitionOpen(Relation relation, Oid partitionId, LOCKMODE lockmode, int2 bucketId=-1); extern void partitionClose(Relation relation, Partition partition, LOCKMODE lockmode); extern Partition tryPartitionOpen(Relation relation, Oid partitionId, LOCKMODE lockmode); -extern Partition PartitionOpenWithPartitionno(Relation relation, Oid partition_id, int partitionno, LOCKMODE lockmode); +extern Partition PartitionOpenWithPartitionno(Relation relation, Oid partitionOid, + int partitionno, LOCKMODE lockmode, bool missingOk = false); extern Relation try_relation_open(Oid relationId, LOCKMODE lockmode); extern Relation relation_openrv(const RangeVar* relation, LOCKMODE lockmode); extern Relation relation_openrv_extended(const RangeVar* relation, LOCKMODE lockmode, bool missing_ok, -- Gitee From 621e1a08ebe284cd5cb881be5d71cb81a391355d Mon Sep 17 00:00:00 2001 From: wofanzheng <2399541971@qq.com> Date: Fri, 20 Sep 2024 15:15:56 +0800 Subject: [PATCH 325/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9B=9E=E6=94=BE?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E6=89=93=E5=8D=B0=E5=8A=A0=E5=9B=BA=E7=9A=84?= =?UTF-8?q?=E5=91=8A=E8=AD=A6=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/include/access/heapam.h | 20 ++++++++++---------- src/include/access/ustore/knl_upage.h | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 55590a6867..5777a4008c 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -189,16 +189,16 @@ static const struct { #define ConditionalLockTupleTuplock(_rel, _tup, _mode) \ ConditionalLockTuple((_rel), (_tup), TupleLockExtraInfo[_mode].hwlock) -#define PagePrintErrorInfo(_page, _msg) \ - do { \ - PageHeader pageHeader = (PageHeader)page; \ - elog(PANIC, \ - "%s, PageHeaderInfo: pd_lsn:%X/%X, pd_checksum:%u, pd_flags:%u, " \ - "pd_lower:%u, pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, pd_prune_xid:%u", \ - _msg, pageHeader->pd_lsn.xlogid, \ - ((uint64)pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF) + pageHeader->pd_lsn.xrecoff, \ - pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ - pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->pd_prune_xid); \ +#define PagePrintErrorInfo(_page, _msg) \ + do { \ + PageHeader pageHeader = (PageHeader)page; \ + elog(PANIC, \ + "%s, PageHeaderInfo: pd_lsn:%X/%X, pd_checksum:%u, pd_flags:%u, " \ + "pd_lower:%u, pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, pd_prune_xid:%u", \ + _msg, pageHeader->pd_lsn.xlogid, \ + (uint32)(((uint64)pageHeader->pd_lsn.xlogid << XLOG_LSN_HIGH_OFF) | pageHeader->pd_lsn.xrecoff), \ + pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ + pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->pd_prune_xid); \ } while (0) /* * This table maps tuple lock strength values for each particular diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index a8e8f79fff..cca10b82d5 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -34,7 +34,7 @@ #define UHP_ALL_VISIBLE 0x0004 /* all tuples on page are visible to \ * everyone */ #define UHEAP_VALID_FLAG_BITS 0xFFFF /* OR of all valid flag bits */ -#define XLOG_UHEAP_LSN_HIGH_OFF 32 +#define XLOG_LSN_HIGH_OFF 32 #define UPageHasFreeLinePointers(_page) (((UHeapPageHeaderData *)(_page))->pd_flags & UHEAP_HAS_FREE_LINES) #define UPageSetHasFreeLinePointers(_page) (((UHeapPageHeaderData *)(_page))->pd_flags |= UHEAP_HAS_FREE_LINES) @@ -217,7 +217,7 @@ "pd_upper:%u, pd_special:%u, pd_pagesize_version:%u, potential_freespace:%u, td_count:%u, " \ "pd_prune_xid:%lu, pd_xid_base:%lu, pd_multi_base:%lu", \ _msg, pageHeader->pd_lsn.xlogid, \ - ((uint64)pageHeader->pd_lsn.xlogid << XLOG_UHEAP_LSN_HIGH_OFF) + pageHeader->pd_lsn.xrecoff, \ + (uint32)(((uint64)pageHeader->pd_lsn.xlogid << XLOG_LSN_HIGH_OFF) | pageHeader->pd_lsn.xrecoff), \ pageHeader->pd_checksum, pageHeader->pd_flags, pageHeader->pd_lower, pageHeader->pd_upper, \ pageHeader->pd_special, pageHeader->pd_pagesize_version, pageHeader->potential_freespace, \ pageHeader->td_count, pageHeader->pd_prune_xid, pageHeader->pd_xid_base, pageHeader->pd_multi_base); \ -- Gitee From b05ee4619307938a51d55d7f202ed43646b76cbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=85=E7=A8=8B?= <517719039@qq.com> Date: Fri, 20 Sep 2024 15:27:50 +0800 Subject: [PATCH 326/347] =?UTF-8?q?=E5=AD=97=E7=AC=A6=E9=9B=86=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0pg=5Fdescription?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/init/globals.cpp | 2 +- .../rollback-post_catalog_maindb_92_953.sql | 17 ++++++++++++++ .../rollback-post_catalog_otherdb_92_953.sql | 17 ++++++++++++++ .../upgrade-post_catalog_maindb_92_953.sql | 22 +++++++++++++++++++ .../upgrade-post_catalog_otherdb_92_953.sql | 22 +++++++++++++++++++ 5 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_953.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_953.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_953.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_953.sql diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index e8fb0239df..be1ccad686 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,7 +76,7 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92952; +const uint32 GRAND_VERSION_NUM = 92953; /******************************************** * 2.VERSION NUM FOR EACH FEATURE diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_953.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_953.sql new file mode 100644 index 0000000000..2bcae553a3 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_953.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION pg_catalog.Delete_pg_collation_temp() +RETURNS void +AS $$ +DECLARE +row_name record; +query_str_nodes text; +BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + delete from pg_catalog.pg_description where objoid in (1327, 1328, 1800, 1801) and classoid = 3456; + END LOOP; + return; +END; +$$ LANGUAGE 'plpgsql'; + +SELECT pg_catalog.Delete_pg_collation_temp(); +DROP FUNCTION pg_catalog.Delete_pg_collation_temp(); diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_953.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_953.sql new file mode 100644 index 0000000000..2bcae553a3 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_953.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION pg_catalog.Delete_pg_collation_temp() +RETURNS void +AS $$ +DECLARE +row_name record; +query_str_nodes text; +BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + delete from pg_catalog.pg_description where objoid in (1327, 1328, 1800, 1801) and classoid = 3456; + END LOOP; + return; +END; +$$ LANGUAGE 'plpgsql'; + +SELECT pg_catalog.Delete_pg_collation_temp(); +DROP FUNCTION pg_catalog.Delete_pg_collation_temp(); diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_953.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_953.sql new file mode 100644 index 0000000000..b677569cd1 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_953.sql @@ -0,0 +1,22 @@ +CREATE OR REPLACE FUNCTION pg_catalog.update_pg_description(IN colloid integer, IN colldesc text) +RETURNS void +AS $$ +DECLARE +row_name record; +query_str_nodes text; +BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + delete from pg_catalog.pg_description where objoid = colloid and classoid = 3456; + insert into pg_catalog.pg_description values(colloid, 3456, 0, colldesc); + END LOOP; + return; +END; +$$ LANGUAGE 'plpgsql'; + +select pg_catalog.update_pg_description(1327, 'gbk_chinese_ci collation'); +select pg_catalog.update_pg_description(1328, 'gbk_bin collation'); +select pg_catalog.update_pg_description(1800, 'gb18030_chinese_ci collation'); +select pg_catalog.update_pg_description(1801, 'gb18030_bin collation'); + +DROP FUNCTION pg_catalog.update_pg_description; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_953.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_953.sql new file mode 100644 index 0000000000..b677569cd1 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_953.sql @@ -0,0 +1,22 @@ +CREATE OR REPLACE FUNCTION pg_catalog.update_pg_description(IN colloid integer, IN colldesc text) +RETURNS void +AS $$ +DECLARE +row_name record; +query_str_nodes text; +BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + delete from pg_catalog.pg_description where objoid = colloid and classoid = 3456; + insert into pg_catalog.pg_description values(colloid, 3456, 0, colldesc); + END LOOP; + return; +END; +$$ LANGUAGE 'plpgsql'; + +select pg_catalog.update_pg_description(1327, 'gbk_chinese_ci collation'); +select pg_catalog.update_pg_description(1328, 'gbk_bin collation'); +select pg_catalog.update_pg_description(1800, 'gb18030_chinese_ci collation'); +select pg_catalog.update_pg_description(1801, 'gb18030_bin collation'); + +DROP FUNCTION pg_catalog.update_pg_description; -- Gitee From fc16cd266be45b88692e07a0030110ea8d6e8672 Mon Sep 17 00:00:00 2001 From: lukeman Date: Fri, 20 Sep 2024 14:40:16 +0800 Subject: [PATCH 327/347] =?UTF-8?q?ecpg=E3=80=81gms=5F*=E3=80=81pg=5Farchi?= =?UTF-8?q?vecleanup=E6=B7=BB=E5=8A=A0=E5=AE=89=E5=85=A8=E7=BC=96=E8=AF=91?= =?UTF-8?q?=E9=80=89=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cmake/src/build_options.cmake | 2 +- contrib/gms_output/Makefile | 2 +- contrib/gms_profiler/Makefile | 2 +- contrib/gms_stats/Makefile | 3 +++ contrib/pg_archivecleanup/Makefile | 1 + src/common/interfaces/ecpg/compatlib/Makefile | 4 ++-- src/common/interfaces/ecpg/preproc/Makefile | 1 + 7 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmake/src/build_options.cmake b/cmake/src/build_options.cmake index 82c543b969..b161361d82 100755 --- a/cmake/src/build_options.cmake +++ b/cmake/src/build_options.cmake @@ -176,7 +176,7 @@ else() endif() # libraries need secure options during compling -set(LIB_SECURE_OPTIONS -fPIC -fno-common -fstack-protector) +set(LIB_SECURE_OPTIONS -fPIC -fno-common -fstack-protector-strong) # libraries need link options during linking set(LIB_LINK_OPTIONS -pthread -std=c++14 -Wl,-z,noexecstack -Wl,-z,relro,-z,now) if(NOT "${ENABLE_UT}" STREQUAL "ON") diff --git a/contrib/gms_output/Makefile b/contrib/gms_output/Makefile index f3144f97f0..741132b94d 100644 --- a/contrib/gms_output/Makefile +++ b/contrib/gms_output/Makefile @@ -6,7 +6,7 @@ EXTENSION = gms_output DATA = gms_output--1.0.sql exclude_option = -fPIE -override CPPFLAGS := $(filter-out $(exclude_option),$(CPPFLAGS)) +override CPPFLAGS := -fstack-protector-strong $(filter-out $(exclude_option),$(CPPFLAGS)) REGRESS = gms_output diff --git a/contrib/gms_profiler/Makefile b/contrib/gms_profiler/Makefile index dffec66d98..f22033728d 100644 --- a/contrib/gms_profiler/Makefile +++ b/contrib/gms_profiler/Makefile @@ -7,7 +7,7 @@ EXTENSION = gms_profiler DATA = gms_profiler--1.0.sql exclude_option = -fPIE -override CPPFLAGS := $(filter-out $(exclude_option),$(CPPFLAGS)) +override CPPFLAGS := -fstack-protector-strong $(filter-out $(exclude_option),$(CPPFLAGS)) REGRESS = gms_profiler diff --git a/contrib/gms_stats/Makefile b/contrib/gms_stats/Makefile index ff09558dad..b912191ce2 100644 --- a/contrib/gms_stats/Makefile +++ b/contrib/gms_stats/Makefile @@ -5,6 +5,9 @@ OBJS = gms_stats.o EXTENSION = gms_stats DATA = gms_stats--1.0.sql +exclude_option = -fPIE +override CPPFLAGS := -fstack-protector-strong $(filter-out $(exclude_option),$(CPPFLAGS)) + REGRESS = gms_stats ifdef USE_PGXS diff --git a/contrib/pg_archivecleanup/Makefile b/contrib/pg_archivecleanup/Makefile index 39c55d8713..2985fb8746 100644 --- a/contrib/pg_archivecleanup/Makefile +++ b/contrib/pg_archivecleanup/Makefile @@ -14,5 +14,6 @@ else subdir = contrib/pg_archivecleanup top_builddir = ../.. include $(top_builddir)/src/Makefile.global +override LDFLAGS += -Wl,-z,relro,-z,now include $(top_srcdir)/contrib/contrib-global.mk endif diff --git a/src/common/interfaces/ecpg/compatlib/Makefile b/src/common/interfaces/ecpg/compatlib/Makefile index 2a97ed7500..15e94fa75e 100644 --- a/src/common/interfaces/ecpg/compatlib/Makefile +++ b/src/common/interfaces/ecpg/compatlib/Makefile @@ -22,8 +22,8 @@ override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -I$(top_srcdir)/src/include/utils $(CPPFLAGS) override CFLAGS += $(PTHREAD_CFLAGS) -override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC +override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -fstack-protector-strong +override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -fstack-protector-strong SHLIB_LINK = -L../ecpglib -lecpg -L../pgtypeslib -lpgtypes $(libpq) \ $(filter -lintl -lm, $(LIBS)) $(PTHREAD_LIBS) diff --git a/src/common/interfaces/ecpg/preproc/Makefile b/src/common/interfaces/ecpg/preproc/Makefile index bcf5fc9962..3221e9fee6 100644 --- a/src/common/interfaces/ecpg/preproc/Makefile +++ b/src/common/interfaces/ecpg/preproc/Makefile @@ -43,6 +43,7 @@ TOOLSDIR = $(top_srcdir)/src/tools GEN_KEYWORDLIST = $(PERL) -I $(TOOLSDIR) $(TOOLSDIR)/gen_keywordlist.pl GEN_KEYWORDLIST_DEPS = $(TOOLSDIR)/gen_keywordlist.pl $(TOOLSDIR)/PerfectHash.pm +override LDFLAGS += -Wl,-z,relro,-z,now ifeq ($(GCC),yes) preproc.o: CXXFLAGS += -Wno-error -- Gitee From d1c7237392b7516c7b6d86041ed69cf409739987 Mon Sep 17 00:00:00 2001 From: chenzhikai <895543892@qq.com> Date: Fri, 20 Sep 2024 16:58:06 +0800 Subject: [PATCH 328/347] =?UTF-8?q?920=20=E6=8E=A8=E7=82=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/ddes/ddes_commit_id | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gausskernel/ddes/ddes_commit_id b/src/gausskernel/ddes/ddes_commit_id index c04756dec1..21030d3d71 100644 --- a/src/gausskernel/ddes/ddes_commit_id +++ b/src/gausskernel/ddes/ddes_commit_id @@ -1,3 +1,3 @@ dms_commit_id=732601209670379e5d00b7abff66628890b101cc -dss_commit_id=621eb9d6aac34726db404446511be2de9ae32a3f -cbb_commit_id=8ef2bfa90731cf48bad7c8d4d5f5b1509b8368b3 +dss_commit_id=5b820b7fc84022df14a5c95e891846aeeb1637a1 +cbb_commit_id=73db0f79fb0f6789b3df8bfee533d39b23d3fa5a -- Gitee From 29c1beae43d22061b96814a7494e8fcc0263eb4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=AE=E5=82=A8-=E7=8E=8B=E5=BB=BA=E8=BE=BE?= Date: Wed, 18 Sep 2024 15:17:45 +0800 Subject: [PATCH 329/347] =?UTF-8?q?pg=5Fquery=5Faudit=E5=87=BD=E6=95=B0=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../rollback-post_catalog_maindb_92_946.sql | 14 +++++++------- .../rollback-post_catalog_otherdb_92_946.sql | 14 +++++++------- .../upgrade-post_catalog_maindb_92_946.sql | 14 +++++++------- .../upgrade-post_catalog_otherdb_92_946.sql | 14 +++++++------- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql index 6e9aa41f79..791a0b7933 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_946.sql @@ -1,8 +1,8 @@ DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -16,13 +16,13 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT thread_id TEXT, OUT local_port TEXT, OUT remote_port TEXT -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, - TEXT, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, + IN directory TEXT, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -36,4 +36,4 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT thread_id TEXT, OUT local_port TEXT, OUT remote_port TEXT -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql index 6e9aa41f79..791a0b7933 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_946.sql @@ -1,8 +1,8 @@ DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -16,13 +16,13 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT thread_id TEXT, OUT local_port TEXT, OUT remote_port TEXT -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, - TEXT, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, + IN directory TEXT, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -36,4 +36,4 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT thread_id TEXT, OUT local_port TEXT, OUT remote_port TEXT -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql index 33db65f6b5..7d7cc6d613 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_946.sql @@ -1,8 +1,8 @@ DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -18,13 +18,13 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT remote_port TEXT, OUT sha_code TEXT, OUT verify_result BOOLEAN -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, - TEXT, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, + IN directory TEXT, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -40,4 +40,4 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT remote_port TEXT, OUT sha_code TEXT, OUT verify_result BOOLEAN -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql index ae02b572e8..4048c73b2f 100644 --- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_946.sql @@ -1,8 +1,8 @@ DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3780; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -18,13 +18,13 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT remote_port TEXT, OUT sha_code TEXT, OUT verify_result BOOLEAN -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; DROP FUNCTION IF EXISTS pg_catalog.pg_query_audit(timestamptz, timestamptz, text) CASCADE; SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3782; CREATE FUNCTION pg_catalog.pg_query_audit( - TIMESTAMPTZ, - TIMESTAMPTZ, - TEXT, + IN "begin" TIMESTAMPTZ, + IN "end" TIMESTAMPTZ, + IN directory TEXT, OUT "time" TIMESTAMPTZ, OUT type TEXT, OUT result TEXT, @@ -40,4 +40,4 @@ CREATE FUNCTION pg_catalog.pg_query_audit( OUT remote_port TEXT, OUT sha_code TEXT, OUT verify_result BOOLEAN -) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 STRICT as 'pg_query_audit'; \ No newline at end of file +) RETURNS SETOF RECORD LANGUAGE INTERNAL VOLATILE ROWS 10 as 'pg_query_audit'; \ No newline at end of file -- Gitee From b3a66b9a2f7bab82c0a4020c53633dbf7e6fb8f7 Mon Sep 17 00:00:00 2001 From: liuchangfeng Date: Fri, 20 Sep 2024 11:48:02 +0800 Subject: [PATCH 330/347] =?UTF-8?q?=E5=A4=87=E6=9C=BA=E5=85=A8=E9=87=8Fbui?= =?UTF-8?q?ld=E5=AF=B9undometa=E6=96=87=E4=BB=B6=E5=81=9A=E6=A0=A1?= =?UTF-8?q?=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../access/ustore/undo/knl_uundoapi.cpp | 106 ++++++++++++++++++ .../storage/replication/basebackup.cpp | 92 +++++++++++++++ src/include/access/ustore/undo/knl_uundoapi.h | 1 + .../access/ustore/undo/knl_uundospace.h | 7 ++ 4 files changed, 206 insertions(+) diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index a6a85602da..3de02ae2cc 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -832,3 +832,109 @@ void ResetUndoZoneLock() } } // namespace undo + +bool CheckUndoZone(char *undoMeta) +{ + Assert(undoMeta != NULL); + uint32 zoneId = 0; + char *persistblock = undoMeta; + char *uspMetaBuffer = NULL; + pg_crc32 pageCrcVal = 0; /* CRC store in undo meta page */ + pg_crc32 comCrcVal = 0; /* calculating CRC current */ + for (zoneId = 0; zoneId < PERSIST_ZONE_COUNT; zoneId++) { + if (zoneId % (UNDOZONE_COUNT_PER_PAGE * PAGES_READ_NUM) == 0) { + if (zoneId / (UNDOZONE_COUNT_PER_PAGE * PAGES_READ_NUM) > 0) { + persistblock = persistblock + UNDO_META_PAGE_SIZE * PAGES_READ_NUM; + } + } + if (zoneId % UNDOZONE_COUNT_PER_PAGE == 0) { + uspMetaBuffer = + persistblock + + ((zoneId % (UNDOZONE_COUNT_PER_PAGE * PAGES_READ_NUM)) / UNDOZONE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE; + uint32 count = UNDOZONE_COUNT_PER_PAGE; + if ((uint32)(PERSIST_ZONE_COUNT - zoneId) < UNDOZONE_COUNT_PER_PAGE) { + count = PERSIST_ZONE_COUNT - zoneId; + } + /* Get page CRC from uspMetaBuffer. */ + pageCrcVal = *(pg_crc32 *)(uspMetaBuffer + sizeof(undo::UndoZoneMetaInfo) * count); + /* + * Calculate the CRC value based on all undospace meta information stored on the page. + * Then compare with pageCrcVal. + */ + INIT_CRC32C(comCrcVal); + COMP_CRC32C(comCrcVal, (void *)uspMetaBuffer, sizeof(undo::UndoZoneMetaInfo) * count); + FIN_CRC32C(comCrcVal); + if (!EQ_CRC32C(pageCrcVal, comCrcVal)) { + ereport( + ERROR, + (errmsg(UNDOFORMAT( + "Undo meta zoneid(%d) CRC calculated(%u) is different from CRC recorded(%u) in page."), + zoneId, comCrcVal, pageCrcVal))); + return false; + } + } + } + return true; +} + +bool CheckUndoSpace(char *undoMeta, UndoSpaceType type) +{ + Assert(undoMeta != NULL); + uint32 zoneId = 0; + uint32 totalPageCnt = 0; + char *persistblock = NULL; + uint32 seek = 0; + char *uspMetaBuffer = NULL; + pg_crc32 pageCrcVal = 0; /* CRC store in undo meta page */ + pg_crc32 comCrcVal = 0; /* calculating CRC current */ + if (type == UNDO_LOG_SPACE) { + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); + seek = totalPageCnt * UNDO_META_PAGE_SIZE; + } else { + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); + seek = totalPageCnt * UNDO_META_PAGE_SIZE; + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, totalPageCnt); + seek += totalPageCnt * UNDO_META_PAGE_SIZE; + } + persistblock = undoMeta + seek; + + for (zoneId = 0; zoneId < PERSIST_ZONE_COUNT; zoneId++) { + if (zoneId % (UNDOSPACE_COUNT_PER_PAGE * PAGES_READ_NUM) == 0) { + if (zoneId / (UNDOZONE_COUNT_PER_PAGE * PAGES_READ_NUM) > 0) { + persistblock = persistblock + UNDO_META_PAGE_SIZE * PAGES_READ_NUM; + } + } + if (zoneId % UNDOSPACE_COUNT_PER_PAGE == 0) { + uspMetaBuffer = + persistblock + ((zoneId % (UNDOSPACE_COUNT_PER_PAGE * PAGES_READ_NUM)) / UNDOSPACE_COUNT_PER_PAGE) * + UNDO_META_PAGE_SIZE; + uint32 count = UNDOSPACE_COUNT_PER_PAGE; + if ((uint32)(PERSIST_ZONE_COUNT - zoneId) < UNDOSPACE_COUNT_PER_PAGE) { + count = PERSIST_ZONE_COUNT - zoneId; + } + /* Get page CRC from uspMetaBuffer. */ + pageCrcVal = *(pg_crc32 *)(uspMetaBuffer + sizeof(undo::UndoSpaceMetaInfo) * count); + /* + * Calculate the CRC value based on all undospace meta information stored on the page. + * Then compare with pageCrcVal. + */ + INIT_CRC32C(comCrcVal); + COMP_CRC32C(comCrcVal, (void *)uspMetaBuffer, sizeof(undo::UndoSpaceMetaInfo) * count); + FIN_CRC32C(comCrcVal); + if (!EQ_CRC32C(pageCrcVal, comCrcVal)) { + ereport(ERROR, + (errmsg(UNDOFORMAT("Undo meta space type(%d) zonid(%d) CRC calculated(%u) is different from CRC recorded(%u) in page."), + type, zoneId, comCrcVal, pageCrcVal))); + return false; + } + } + } + return true; +} + +bool CheckUndoMetaBuf(char *undoMeta) +{ + Assert(undoMeta != NULL); + return CheckUndoZone(undoMeta) && CheckUndoSpace(undoMeta, UNDO_LOG_SPACE) && + CheckUndoSpace(undoMeta, UNDO_SLOT_SPACE); +} \ No newline at end of file diff --git a/src/gausskernel/storage/replication/basebackup.cpp b/src/gausskernel/storage/replication/basebackup.cpp index 2532c6a2a5..7b838e59c9 100755 --- a/src/gausskernel/storage/replication/basebackup.cpp +++ b/src/gausskernel/storage/replication/basebackup.cpp @@ -127,6 +127,9 @@ static XLogRecPtr UpdateStartPtr(XLogRecPtr minLsn, XLogRecPtr curStartPtr); /* compressed Function */ static void SendCompressedFile(char* readFileName, int basePathLen, struct stat& statbuf, bool missingOk, int64* size); +static bool SendUndoMeta(FILE *fp, struct stat *statbuf); +const int undometaSize = UNDOSPACE_META_PAGE_SIZE + 2 * UNDOSPACE_SPACE_PAGE_SIZE; +const int undometaRetryMax = 3; /* * save xlog location @@ -2240,6 +2243,10 @@ static bool sendFile(char *readfilename, char *tarfilename, struct stat *statbuf /* send the pkg header containing msg like file size */ _tarWriteHeader(tarfilename, NULL, statbuf); + char *fname = NULL; + if ((fname = strstr(readfilename, UNDO_META_FILE)) != NULL) { + return SendUndoMeta(fp, statbuf); + } /* Because pg_control file is shared in all instance when dss is enabled. Here pg_control of primary id * need to send to main standby in standby cluster, so we must seek a postion accoring to primary id. @@ -2462,3 +2469,88 @@ void ut_save_xlogloc(const char *xloglocation) { save_xlogloc(xloglocation); } + +static bool SendUndoMeta(FILE *fp, struct stat *statbuf) +{ + Assert(fp != NULL); + Assert(statbuf != NULL); + if (statbuf->st_size != undometaSize) { + (void)FreeFile(fp); + ereport(ERROR, (errmsg("Undometa size[%d] error", statbuf->st_size))); + } + pgoff_t len = 0; + MemoryContext oldcxt = MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + char *undoMeta = (char *)palloc0(statbuf->st_size); + MemoryContextSwitchTo(oldcxt); + int retry = 0; + size_t cnt = 0; + errno_t rc = 0; + size_t pad; + + fseek(fp, 0, SEEK_SET); + while ((cnt = fread(undoMeta, 1, statbuf->st_size, fp)) > 0) { + if (t_thrd.walsender_cxt.walsender_ready_to_stop) { + pfree(undoMeta); + ereport(ERROR, (errcode_for_file_access(), errmsg("base backup receive stop message, aborting backup"))); + } + if (cnt != (size_t)statbuf->st_size) { + if (ferror(fp)) { + pfree(undoMeta); + ereport(ERROR, (errcode_for_file_access(), errmsg("could not read file undometa file"))); + } + } + if (CheckUndoMetaBuf(undoMeta)) { + ereport(LOG, (errmsg("checkUndoMeta Success"))); + break; + } + retry++; + fseek(fp, 0, SEEK_SET); + if (retry > undometaRetryMax) { + pfree(undoMeta); + (void)FreeFile(fp); + ereport(ERROR, (errmsg("Read undo meta error"))); + } + } + while (len < statbuf->st_size) { + if (t_thrd.walsender_cxt.walsender_ready_to_stop) { + ereport(ERROR, (errcode_for_file_access(), errmsg("base backup receive stop message, aborting backup"))); + } + cnt = Min(TAR_SEND_SIZE, statbuf->st_size - len); + + /* Send the chunk as a CopyData Message */ + if (pq_putmessage_noblock('d', undoMeta + len, cnt)) { + ereport(ERROR, (errcode_for_file_access(), errmsg("base backup could not send data, aborting backup"))); + } + len += cnt; + if (len >= statbuf->st_size) { + /* + * Reached end of file. The file could be longer, if it was + * extended while we were sending it, but for a base backup we can + * ignore such extended data. It will be restored from WAL. + */ + break; + } + } + + /* If the file was truncated while we were sending it, pad it with zeros */ + if (len < statbuf->st_size) { + rc = memset_s(t_thrd.basebackup_cxt.buf_block, TAR_SEND_SIZE, 0, TAR_SEND_SIZE); + securec_check(rc, "", ""); + while (len < statbuf->st_size) { + cnt = Min(TAR_SEND_SIZE, statbuf->st_size - len); + (void)pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, cnt); + len += cnt; + } + } + + /* Pad to 512 byte boundary, per tar format requirements */ + pad = ((len + 511) & ~511) - len; + if (pad > 0) { + rc = memset_s(t_thrd.basebackup_cxt.buf_block, pad, 0, pad); + securec_check(rc, "", ""); + (void)pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, pad); + } + pfree(undoMeta); + (void)FreeFile(fp); + return true; +} diff --git a/src/include/access/ustore/undo/knl_uundoapi.h b/src/include/access/ustore/undo/knl_uundoapi.h index 66a2d06975..b6c813c253 100644 --- a/src/include/access/ustore/undo/knl_uundoapi.h +++ b/src/include/access/ustore/undo/knl_uundoapi.h @@ -70,4 +70,5 @@ void ResetUndoZoneLock(); } // namespace undo extern void GetUndoFileDirectory(char *path, int len, UndoPersistence upersistence); +bool CheckUndoMetaBuf(char *undoMeta); #endif // __KNL_UUNDOAPI_H__ diff --git a/src/include/access/ustore/undo/knl_uundospace.h b/src/include/access/ustore/undo/knl_uundospace.h index b738fd322d..7b0c430773 100644 --- a/src/include/access/ustore/undo/knl_uundospace.h +++ b/src/include/access/ustore/undo/knl_uundospace.h @@ -41,6 +41,13 @@ typedef struct UndoSpaceMetaInfo { count = (total % unit == 0) ? (total / unit) : (total / unit) + 1; \ } while (0) +#define UNDOSPACE_META_PAGE_SIZE UNDO_META_PAGE_SIZE * \ +((PERSIST_ZONE_COUNT % UNDOZONE_COUNT_PER_PAGE == 0) ? (PERSIST_ZONE_COUNT / UNDOZONE_COUNT_PER_PAGE) : (PERSIST_ZONE_COUNT / UNDOZONE_COUNT_PER_PAGE) + 1) + +#define UNDOSPACE_SPACE_PAGE_SIZE UNDO_META_PAGE_SIZE * \ +((PERSIST_ZONE_COUNT % UNDOSPACE_COUNT_PER_PAGE == 0) ? (PERSIST_ZONE_COUNT / UNDOSPACE_COUNT_PER_PAGE) : (PERSIST_ZONE_COUNT / UNDOSPACE_COUNT_PER_PAGE) + 1) + + /* * UndoSpace class is used as a proxy to manipulate undo zone(segment) file. */ -- Gitee From a0b11e859876f5732352cc1423e2a22536a2fbca Mon Sep 17 00:00:00 2001 From: zzh Date: Fri, 20 Sep 2024 15:07:07 +0800 Subject: [PATCH 331/347] =?UTF-8?q?=E5=8D=95=E6=9C=BA=E5=AE=B9=E5=99=A8pg?= =?UTF-8?q?=5Fhba.conf=E9=85=8D=E7=BD=AE=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docker/dockerfiles/5.0.0/entrypoint.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/dockerfiles/5.0.0/entrypoint.sh b/docker/dockerfiles/5.0.0/entrypoint.sh index c1477bf11b..af94e45a84 100644 --- a/docker/dockerfiles/5.0.0/entrypoint.sh +++ b/docker/dockerfiles/5.0.0/entrypoint.sh @@ -259,7 +259,9 @@ opengauss_setup_hba_conf() { echo "host replication repuser $OG_SUBNET trust" fi } >> "$PGDATA/pg_hba.conf" - sed -i "/# IPv6 local connections:/a host all omm $OG_SUBNET trust" $PGDATA/pg_hba.conf + if [ -n "$SERVER_MODE" ]; then + sed -i "/# IPv6 local connections:/a host all omm $OG_SUBNET trust" $PGDATA/pg_hba.conf + fi } # append parameter to postgres.conf for connections -- Gitee From 1bb33c8b0c6f40d6f81d8c5efcbaa55ab2493ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=AE=E5=82=A8-=E7=8E=8B=E5=BB=BA=E8=BE=BE?= Date: Tue, 3 Sep 2024 16:52:07 +0800 Subject: [PATCH 332/347] =?UTF-8?q?6.0=E7=89=88=E6=9C=AC=E5=AE=A1=E8=AE=A1?= =?UTF-8?q?=E6=97=A5=E5=BF=97tpcc=E6=80=A7=E8=83=BD=E4=B8=8B=E9=99=8D=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../process/postmaster/pgaudit.cpp | 185 ++++++++++-------- src/include/pgaudit.h | 14 ++ 2 files changed, 117 insertions(+), 82 deletions(-) diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp index 5fbc36b00c..b78089c6db 100755 --- a/src/gausskernel/process/postmaster/pgaudit.cpp +++ b/src/gausskernel/process/postmaster/pgaudit.cpp @@ -361,10 +361,10 @@ typedef struct AuditData { #define WRITE_TO_STDAUDITFILE(ctype) (t_thrd.role == AUDITOR && ctype == STD_AUDIT_TYPE) #define WRITE_TO_UNIAUDITFILE(ctype) (t_thrd.role == AUDITOR && ctype == UNIFIED_AUDIT_TYPE) -#define MAX_DATA_LEN 1024 /*sha data len*/ -#define SHA256_LENTH 32 /*sha length*/ -#define SHA256_HEX_LENTH 512 /*sha hex length*/ -#define SHA_LOG_MAX_TIMELEN 80 /*sha date length*/ +#define MAX_DATA_LEN 1024 /* sha data len */ +#define SHA256_LENTH 32 /* sha length */ +#define SHA256_HEX_LENTH 512 /* sha hex length */ +#define SHA_LOG_MAX_TIMELEN 128 /* sha date length */ struct AuditEventInfo { AuditEventInfo() : userid{0}, @@ -470,11 +470,9 @@ static bool pgaudit_invalid_header(const AuditMsgHdr* header, bool newVersion); static void pgaudit_mark_corrupt_info(uint32 fnum); static void audit_append_xid_info(const char *detail_info, char *detail_info_xid, uint32 len); static bool audit_status_check_ok(); -/*audit sha code*/ +/* audit sha code */ static bool pgaudit_need_sha_code(); -static void generate_audit_sha_code(pg_time_t time, const char* type, const char* result, char *userid, const char* username, const char* dbname, char* client_info, \ - const char *object_name, const char *detail_info, const char* nodename, char* threadid, char* localport, \ - char* remoteport, unsigned char* shacode); +static void generateAuditShaCode(AuditShaRecord *shaRecord, char* hexbuf); static void init_audit_signal_handlers() { (void)gspqsignal(SIGHUP, sigHupHandler); /* set flag to read config file */ @@ -612,34 +610,54 @@ static bool pgaudit_need_sha_code() * the fileds are arraged as below sequence, Note it's not liable to modify them as to keep compatibility of version * time|type|result|userid|username|dbname|client_info|object_name|detail_info|nodename|threadid|localport|remoteport */ -static void generate_audit_sha_code(pg_time_t time, AuditType type, AuditResult result, char* userid, const char* username, const char* dbname, char* client_info, - const char* object_name, const char* detail_info, const char* nodename, char* threadid, char* localport, - char* remoteport, unsigned char* shacode) -{ - char timeTzLocaltime[SHA_LOG_MAX_TIMELEN] = {0}; - struct tm* system; - system = localtime(&time); - if (system != nullptr) { - (void)strftime(timeTzLocaltime, SHA_LOG_MAX_TIMELEN, "%Y-%m-%d_%H%M%S", system); - } - userid = (userid != NULL && userid[0] != '\0') ? userid : NULL; - username = (username != NULL && username[0] != '\0') ? username : NULL; - dbname = (dbname != NULL && dbname[0] != '\0') ? dbname : NULL; - client_info = (client_info != NULL && client_info[0] != '\0') ? client_info : NULL; - object_name = (object_name != NULL && object_name[0] != '\0') ? object_name : NULL; - detail_info = (detail_info != NULL && detail_info[0] != '\0') ? detail_info : NULL; - nodename = (nodename != NULL && nodename[0] != '\0') ? nodename : NULL; - threadid = (threadid != NULL && threadid[0] != '\0') ? threadid : NULL; - localport = (localport != NULL && localport[0] != '\0') ? localport : NULL; - remoteport = (remoteport != NULL && remoteport[0] != '\0') ? remoteport : NULL; - // TimestampTz timeTz = time_t_to_timestamptz(time); - StringInfoData str; - initStringInfo(&str); - appendStringInfo(&str, "%s | %d | %d | %s | %s | %s | %s | %s | %s | %s | %s | %s | %s", - timeTzLocaltime, type, result, userid, username, dbname, client_info, object_name, - detail_info, nodename, threadid, localport, remoteport); - SHA256((const unsigned char*)str.data , str.len, shacode); - pfree_ext(str.data); +static void generateAuditShaCode(AuditShaRecord *shaRecord, char* hexbuf) +{ + if (shaRecord == NULL) { + return; + } + // unsigned char* shacode + unsigned char shacode[SHA256_LENTH + 1] = {0}; // unsigned char* shacode + size_t textLen; + int rc; + textLen = sizeof(AuditType) + 1 + sizeof(AuditResult) + 1 + + (shaRecord->userid == NULL ? 0 : strlen(shaRecord->userid) + 1) + + (shaRecord->username == NULL ? 0 : strlen(shaRecord->username) + 1) + + (shaRecord->dbname == NULL ? 0: strlen(shaRecord->dbname) + 1) + + (shaRecord->clientInfo == NULL ? 0 : strlen(shaRecord->clientInfo) + 1) + + (shaRecord->objectName == NULL ? 0 : strlen(shaRecord->objectName) + 1) + + (shaRecord->detailInfo == NULL ? 0 : strlen(shaRecord->detailInfo) + 1) + + (shaRecord->nodename == NULL ? 0 : strlen(shaRecord->nodename) + 1) + + (shaRecord->threadid == NULL ? 0 : strlen(shaRecord->threadid) + 1) + + (shaRecord->localport == NULL ? 0 : strlen(shaRecord->localport) + 1) + + (shaRecord->remoteport == NULL ? 0 : strlen(shaRecord->remoteport) + 1); + char* text = NULL; + text = (char*)palloc(textLen); + if (text == NULL) { + return; + } + rc = snprintf_s(text, + textLen, + textLen-1, + "%d%d%s%s%s%s%s%s%s%s%s%s", + shaRecord->type, + shaRecord->result, + shaRecord->userid, + shaRecord->username, + shaRecord->dbname, + shaRecord->clientInfo, + shaRecord->objectName, + shaRecord->detailInfo, + shaRecord->nodename, + shaRecord->threadid, + shaRecord->localport, + shaRecord->remoteport); + securec_check_intval(rc, , ); + /* sha code convert to hex */ + if (text != NULL) { + SHA256((const unsigned char*)text, strlen((const char*)text), shacode); + sha_bytes_to_hex64((uint8*)shacode, hexbuf); + pfree(text); + } return; } @@ -2064,6 +2082,7 @@ static bool audit_get_clientinfo(AuditType type, const char* object_name, AuditE void audit_report(AuditType type, AuditResult result, const char *object_name, const char *detail_info, AuditClassType ctype) { + bool newVersion = false; /* check the process status to decide whether to report it */ if (!audit_status_check_ok() || (detail_info == NULL)) { return; @@ -2078,7 +2097,6 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c StringInfoData buf; AuditData adata; AuditEventInfo event_info; - unsigned char shacode[SHA256_HEX_LENTH] = {0}; if (!audit_get_clientinfo(type, object_name, event_info)) { return; } @@ -2086,6 +2104,10 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c if (audit_check_client_blacklist(event_info.client_info)) { return; } + if (pgaudit_need_sha_code()) { + newVersion = true; + } + char *userid = event_info.userid; const char* username = event_info.username; const char* dbname = event_info.dbname; @@ -2107,7 +2129,7 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c adata.header.signature[0] = 'A'; adata.header.signature[1] = 'U'; adata.header.version = 0; - if (pgaudit_need_sha_code()) { + if (newVersion) { adata.header.fields = PGAUDIT_QUERY_COLS_NEW; } else { adata.header.fields = PGAUDIT_QUERY_COLS; @@ -2117,15 +2139,24 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c adata.header.size = 0; adata.type = type; adata.result = result; - char hexbuf[SHA256_HEX_LENTH]={0}; - /*type result format*/ - if (pgaudit_need_sha_code()) { - /*sha code for audit*/ - generate_audit_sha_code(adata.header.time / 1000, type, result, userid, username, dbname, client_info, object_name, - detail_info, g_instance.attr.attr_common.PGXCNodeName, threadid, localport, - remoteport, shacode); - /*sha code convert to hex*/ - sha_bytes_to_hex64((uint8*)shacode, hexbuf); + char hexbuf[SHA256_LENTH * ENCRY_LENGTH_DOUBLE + 1] = {0}; + AuditShaRecord shaRecord; + /* type result format */ + if (newVersion) { + /* sha code for audit */ + shaRecord.type = type; + shaRecord.result = result; + shaRecord.userid = userid; + shaRecord.username = username; + shaRecord.dbname = dbname; + shaRecord.clientInfo = (client_info[0] != '\0') ? client_info : NULL; + shaRecord.objectName = object_name; + shaRecord.detailInfo = (!audit_xid_info) ? detail_info : detail_info_xid; + shaRecord.nodename = g_instance.attr.attr_common.PGXCNodeName; + shaRecord.threadid = (threadid[0] != '\0') ? threadid : NULL; + shaRecord.localport = (localport[0] != '\0') ? localport : NULL; + shaRecord.remoteport = (remoteport[0] != '\0') ? remoteport : NULL; + generateAuditShaCode(&shaRecord, hexbuf); } initStringInfo(&buf); appendBinaryStringInfo(&buf, (char*)&adata, AUDIT_HEADER_SIZE); @@ -2141,8 +2172,8 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c appendStringField(&buf, (threadid[0] != '\0') ? threadid : NULL); appendStringField(&buf, (localport[0] != '\0') ? localport : NULL); appendStringField(&buf, (remoteport[0] != '\0') ? remoteport : NULL); - if (pgaudit_need_sha_code()) { - appendStringField(&buf, (shacode[0] != '\0') ? (const char*)hexbuf : NULL); + if (newVersion) { + appendStringField(&buf, (hexbuf[0] != '\0') ? (const char*)hexbuf : NULL); } /* @@ -2163,7 +2194,11 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c if (detail_info_xid != NULL) { pfree(detail_info_xid); } - pfree(buf.data); + if (buf.len > 0) { + pfree(buf.data); + } else { + ereport(LOG, (errmsg("audit buf data empty"))); + } } /* Brief : close a file. */ @@ -2914,27 +2949,18 @@ static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS_NEW], bool nulls[PGAUDIT_QUERY_COLS_NEW], bool newVersion) { - /*sha param*/ - char* userid = NULL; - const char* username =NULL; - const char* dbname = NULL; - char* client_info = NULL; - const char* object_name = NULL; - const char* detail_info =NULL; - const char* nodename = NULL; - char* threadid = NULL; - char* localport = NULL; - char* remoteport = NULL; - unsigned char shacode[SHA256_HEX_LENTH] = {0}; + /* sha param */ + AuditShaRecord shaRecord; const char* saved_hexbuf = NULL; - char hexbuf[SHA256_HEX_LENTH]={0}; + char hexbuf[SHA256_LENTH * ENCRY_LENGTH_DOUBLE + 1] = {0}; /* append timestamp info to data tuple */ int i = 0; values[i++] = TimestampTzGetDatum(time_t_to_timestamptz(adata->header.time)); values[i++] = CStringGetTextDatum(AuditTypeDesc(adata->type)); values[i++] = CStringGetTextDatum(AuditResultDesc(adata->result)); - // values[i++] = CStringGetTextDatum((const char*)adata->shacode); + shaRecord.type = adata->type; + shaRecord.result = adata->result; /* * new format of the audit file under correct record @@ -2945,45 +2971,43 @@ static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS_NEW], bool new_version = (header.fields == PGAUDIT_QUERY_COLS || header.fields == PGAUDIT_QUERY_COLS_NEW); field = new_version ? pgaudit_string_field(adata, index_field++) : NULL; - userid = (char*)field; + shaRecord.userid = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* user id */ field = pgaudit_string_field(adata, index_field++); - username = (const char*)field; + shaRecord.username = field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* user name */ field = pgaudit_string_field(adata, index_field++); - dbname = (const char*)field; + shaRecord.dbname = field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* dbname */ field = pgaudit_string_field(adata, index_field++); - client_info = (char*)field; + shaRecord.clientInfo = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* client info */ field = pgaudit_string_field(adata, index_field++); - if (field != NULL) { - object_name = (const char*)field; - } + shaRecord.objectName = field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* object name */ field = pgaudit_string_field(adata, index_field++); - detail_info = (const char*)field; + shaRecord.detailInfo = field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* detail info */ field = pgaudit_string_field(adata, index_field++); - nodename = (const char*)field; + shaRecord.nodename = field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* node name */ field = pgaudit_string_field(adata, index_field++); - threadid = (char*)field; + shaRecord.threadid = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* thread id */ field = pgaudit_string_field(adata, index_field++); - localport = (char*)field; + shaRecord.localport = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* local port */ field = pgaudit_string_field(adata, index_field++); - remoteport = (char*)field; + shaRecord.remoteport = (char*)field; values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* remote port */ if (header.fields == PGAUDIT_QUERY_COLS_NEW) { @@ -2992,16 +3016,13 @@ static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS_NEW], field = NULL; } values[i++] = CStringGetTextDatum(FILED_NULLABLE(field)); /* sha_code hex data*/ - if (pgaudit_need_sha_code()) { + if (newVersion) { saved_hexbuf = field; if (header.fields == PGAUDIT_QUERY_COLS_NEW) { if (saved_hexbuf != NULL && saved_hexbuf[0] != '\0') { bool verifyResult = false; - /*sha code for audit*/ - generate_audit_sha_code(adata->header.time, adata->type, adata->result, userid, username, dbname, client_info, object_name, - detail_info, nodename, threadid, localport, remoteport, shacode); - /*sha code convert to hex*/ - sha_bytes_to_hex64((uint8*)shacode, hexbuf); + /* sha code for audit */ + generateAuditShaCode(&shaRecord, hexbuf); if (strcmp((const char*)hexbuf, (const char*)saved_hexbuf) == 0) { verifyResult = true; } diff --git a/src/include/pgaudit.h b/src/include/pgaudit.h index 60e7c3f35b..27aa9bee77 100644 --- a/src/include/pgaudit.h +++ b/src/include/pgaudit.h @@ -191,6 +191,20 @@ typedef enum { AUDIT_UNKNOWN = 0, AUDIT_OK, AUDIT_FAILED } AuditResult; typedef enum { AUDIT_FUNC_QUERY = 0, AUDIT_FUNC_DELETE } AuditFuncType; typedef enum { STD_AUDIT_TYPE = 0, UNIFIED_AUDIT_TYPE } AuditClassType; +typedef struct AuditShaRecord { + AuditType type; + AuditResult result; + char* userid; + const char* username; + const char* dbname; + char* clientInfo; + const char* objectName; + const char* detailInfo; + const char* nodename; + char* threadid; + char* localport; + char* remoteport; +} AuditShaRecord; extern void audit_report(AuditType type, AuditResult result, const char* object_name, const char* detail_info, AuditClassType ctype = STD_AUDIT_TYPE); extern Datum pg_query_audit(PG_FUNCTION_ARGS); extern Datum pg_delete_audit(PG_FUNCTION_ARGS); -- Gitee From e2c3d0f7b30a67a5e57375a8531c26e193584e4c Mon Sep 17 00:00:00 2001 From: Lamaric Date: Thu, 1 Aug 2024 16:20:55 +0800 Subject: [PATCH 333/347] =?UTF-8?q?cherry=20pick=20a032fff=20from=20https:?= =?UTF-8?q?//gitee.com/cbd123cbd/openGauss-server/pulls/5921=20=E8=B5=84?= =?UTF-8?q?=E6=BA=90=E6=B1=A0=E5=8C=96=E5=AD=98=E5=82=A8=E5=A4=8D=E5=88=B6?= =?UTF-8?q?=E5=A4=87=E9=9B=86=E7=BE=A4=E6=B2=A1=E6=9C=89remove=20xlogfile?= =?UTF-8?q?=E7=9A=84=E9=9C=80=E8=A6=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/transam/xlog.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 1b8a05dcb8..30bb8c6fc6 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -6876,6 +6876,10 @@ static XLogSegNo GetOldestXLOGSegNo(const char *workingPath) (void)closedir(xlogDir); if (sscanf_s(oldestXLogFileName, "%08X%08X%08X", &tli, &xlogReadLogid, &xlogReadLogSeg) != 3) { + if (SS_DORADO_STANDBY_CLUSTER) { + /* ss standby cluster does not actually need to remove xlog file. */ + return (XLogSegNo)0; + } ereport(ERROR, (errcode_for_file_access(), errmsg("failed to translate name to xlog in GetOldestXLOGSegNo."))); } segno = (uint64)xlogReadLogid * XLogSegmentsPerXLogId + xlogReadLogSeg - 1; -- Gitee From f10fe94e739d4ef6715c0aaf8c45a1f753b6532e Mon Sep 17 00:00:00 2001 From: liuzhanfeng2 Date: Sat, 21 Sep 2024 09:14:44 +0800 Subject: [PATCH 334/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=BC=82=E5=B8=B8?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E4=B8=8Bstop=E5=8F=AA=E8=83=BD=E8=B6=85?= =?UTF-8?q?=E6=97=B6=E9=80=80=E5=87=BA=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/pg_ctl/pg_ctl.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/bin/pg_ctl/pg_ctl.cpp b/src/bin/pg_ctl/pg_ctl.cpp index d04e4f41a5..53f1cab6ea 100755 --- a/src/bin/pg_ctl/pg_ctl.cpp +++ b/src/bin/pg_ctl/pg_ctl.cpp @@ -1913,8 +1913,7 @@ static void do_stop(bool force) print_msg(_("waiting for server to shut down...")); for (cnt = 0; cnt < wait_seconds; cnt++) { - if (((pid = get_pgpid()) != 0) || - (postmaster_is_alive((pid_t)tpid) && IsMyPostmasterPid((pid_t)tpid, pg_config))) { + if (postmaster_is_alive((pid_t)tpid) && IsMyPostmasterPid((pid_t)tpid, pg_config)) { print_msg("."); pg_usleep(1000000); /* 1 sec */ } else -- Gitee From a15ced4cb3864996bbbc8b50a367ee144e5a9194 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Sat, 21 Sep 2024 10:30:20 +0800 Subject: [PATCH 335/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=81=9A=E5=90=88?= =?UTF-8?q?=E5=B5=8C=E5=A5=97=E6=97=B6=E7=9A=84core=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/parser/parse_agg.cpp | 3 ++- src/test/regress/expected/aggregates_part3.out | 3 +++ src/test/regress/sql/aggregates_part3.sql | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/common/backend/parser/parse_agg.cpp b/src/common/backend/parser/parse_agg.cpp index af09c7165b..d667f39c0b 100644 --- a/src/common/backend/parser/parse_agg.cpp +++ b/src/common/backend/parser/parse_agg.cpp @@ -225,7 +225,8 @@ void transformAggregateCall(ParseState* pstate, Aggref* agg, List* args, List* a * didn't find any local vars or aggs. */ if (min_varlevel == 0) { - if (pstate->p_hasAggs && checkExprHasAggs((Node*)agg->args)) { + if (pstate->p_hasAggs && + (checkExprHasAggs((Node*)agg->args) || checkExprHasAggs((Node*)agg->aggdirectargs))) { ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), errmsg("aggregate function calls cannot be nested"), diff --git a/src/test/regress/expected/aggregates_part3.out b/src/test/regress/expected/aggregates_part3.out index 7b19f51c13..1747c426a1 100644 --- a/src/test/regress/expected/aggregates_part3.out +++ b/src/test/regress/expected/aggregates_part3.out @@ -579,6 +579,9 @@ GROUP BY c2,c3; ----+----+----+------------ (0 rows) +SELECT percentile_cont(sum(value)) WITHIN GROUP (ORDER BY value) FROM (VALUES ('2019-07-12 00:00:01'::timestamptz,'red',1),('2019-07-12 00:00:01'::timestamptz,'blue',2)) v(time,color,value); +ERROR: aggregate function calls cannot be nested +CONTEXT: referenced column: percentile_cont reset current_schema; drop schema if exists distribute_aggregates_part3 cascade; NOTICE: drop cascades to 2 other objects diff --git a/src/test/regress/sql/aggregates_part3.sql b/src/test/regress/sql/aggregates_part3.sql index e90d304eda..fe8a303ab0 100644 --- a/src/test/regress/sql/aggregates_part3.sql +++ b/src/test/regress/sql/aggregates_part3.sql @@ -117,5 +117,7 @@ SELECT FROM sales_transaction_line GROUP BY c2,c3; +SELECT percentile_cont(sum(value)) WITHIN GROUP (ORDER BY value) FROM (VALUES ('2019-07-12 00:00:01'::timestamptz,'red',1),('2019-07-12 00:00:01'::timestamptz,'blue',2)) v(time,color,value); + reset current_schema; drop schema if exists distribute_aggregates_part3 cascade; -- Gitee From 7e074c1bbe09c00624a30a412ff4e01414ace564 Mon Sep 17 00:00:00 2001 From: yuchao Date: Sat, 21 Sep 2024 15:35:11 +0800 Subject: [PATCH 336/347] =?UTF-8?q?=E3=80=90=E5=9B=9E=E5=90=886.0.0?= =?UTF-8?q?=E3=80=91=20https://gitee.com/opengauss/openGauss-server/pulls/?= =?UTF-8?q?6329?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/cluster_guc.conf | 1 + src/bin/pg_dump/pg_backup_archiver.cpp | 2 + src/common/backend/parser/gram.y | 5 ++- src/common/backend/utils/adt/ruleutils.cpp | 13 +++++- src/common/backend/utils/misc/guc.cpp | 12 ++++++ .../optimizer/commands/trigger.cpp | 4 +- .../knl/knl_guc/knl_session_attr_common.h | 1 + .../regress/input/dump_trigger_definer.source | 29 +++++++++++++ .../output/cursor_expression_dump.source | 1 + .../output/dump_trigger_definer.source | 41 +++++++++++++++++++ .../output/event_trigger_dump_restore.source | 1 + src/test/regress/output/mysql_function.source | 1 + src/test/regress/output/plpgsql_dump.source | 1 + .../regress/output/test_float_dump.source | 1 + .../regress/output/view_definer_test.source | 1 + src/test/regress/parallel_schedule0 | 4 +- src/test/regress/parallel_schedule0A | 2 +- 17 files changed, 111 insertions(+), 9 deletions(-) create mode 100644 src/test/regress/input/dump_trigger_definer.source create mode 100644 src/test/regress/output/dump_trigger_definer.source diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 4ff580696a..e7f107afa0 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -976,6 +976,7 @@ track_activity_query_size|int|100,102400|NULL|NULL| event_source|string|0,0|NULL|NULL| memorypool_enable|bool|0,0|NULL|NULL| enable_memory_limit|bool|0,0|NULL|NULL| +enable_dump_trigger_definer|bool|0,0|NULL|NULL| datanode_heartbeat_interval|int|1000,60000|ms|The value is best configured less than half of the wal_receiver_timeout and wal_sender_timeout.| cost_weight_index|real|1e-10,1e+10|NULL|NULL| default_limit_rows|real|-100,1.79769e+308|NULL|NULL| diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp index 103f5bb434..0ba2972d9c 100644 --- a/src/bin/pg_dump/pg_backup_archiver.cpp +++ b/src/bin/pg_dump/pg_backup_archiver.cpp @@ -2844,6 +2844,8 @@ static void _doSetFixedOutputState(ArchiveHandle* AH) if (findDBCompatibility(&AH->publicArc, PQdb(GetConnection(&AH->publicArc))) && hasSpecificExtension(&AH->publicArc, "dolphin")) (void)ahprintf(AH, "SET dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length';\n"); + (void)ahprintf(AH, "SET enable_dump_trigger_definer = on;\n"); + (void)ahprintf(AH, "\n"); } diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 3f673423af..8d6f476032 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -12095,7 +12095,7 @@ CreateTrigStmt: (errcode(ERRCODE_SYNTAX_ERROR), errmsg("or replace is not supported here."), parser_errposition(@2))); } - if ($3 != NULL) + if ($3 != NULL && !u_sess->attr.attr_common.enable_dump_trigger_definer) { ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), @@ -16472,7 +16472,8 @@ view_security_expression: SQL_P SECURITY view_security_option definer_expression: DEFINER '=' UserId { - if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { + if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT || + u_sess->attr.attr_common.enable_dump_trigger_definer) { $$ = $3; } else { parser_yyerror("not support DEFINER function"); diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 5dc2db4fe7..f16a09af31 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -3157,8 +3157,17 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty) appendStringInfo(&buf, "CREATE TRIGGER %s ", quote_identifier(tgname)); } } else { - appendStringInfo(&buf, "CREATE %sTRIGGER %s ", OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "", - quote_identifier(tgname)); + if (OidIsValid(trigrec->tgconstraint)) { + appendStringInfo(&buf, "CREATE CONSTRAINT TRIGGER %s ", quote_identifier(tgname)); + } else { + value = fastgetattr(ht_trig, Anum_pg_trigger_tgowner, tgrel->rd_att, &isnull); + if (DatumGetObjectId(value) != GetUserId()) { + appendStringInfo(&buf, "CREATE DEFINER = %s TRIGGER %s ", GetUserNameFromId(DatumGetObjectId(value)), + quote_identifier(tgname)); + } else { + appendStringInfo(&buf, "CREATE TRIGGER %s ", quote_identifier(tgname)); + } + } } if (TRIGGER_FOR_BEFORE(trigrec->tgtype)) diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 8731090217..0089b524e0 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -2126,6 +2126,18 @@ static void InitConfigureNamesBool() NULL, NULL }, + {{"enable_dump_trigger_definer", + PGC_USERSET, + NODE_ALL, + UNGROUPED, + gettext_noop("Enable dump trigger definer"), + NULL}, + &u_sess->attr.attr_common.enable_dump_trigger_definer, + false, + NULL, + NULL, + NULL + }, /* End-of-list marker */ {{NULL, (GucContext)0, diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index fb27a15868..fbc5722a35 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -262,7 +262,7 @@ ObjectAddress CreateTrigger(CreateTrigStmt* stmt, const char* queryString, Oid r } } - if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { + if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT || u_sess->attr.attr_common.enable_dump_trigger_definer) { Oid curuser = GetUserId(); if (stmt->definer) { HeapTuple roletuple = SearchUserHostName(stmt->definer, NULL); @@ -823,7 +823,7 @@ ObjectAddress CreateTrigger(CreateTrigStmt* stmt, const char* queryString, Oid r nulls[Anum_pg_trigger_tgqual - 1] = true; /* set trigger owner */ - if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { + if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT || u_sess->attr.attr_common.enable_dump_trigger_definer) { tg_owner = proownerid; } else { diff --git a/src/include/knl/knl_guc/knl_session_attr_common.h b/src/include/knl/knl_guc/knl_session_attr_common.h index 9e90891187..50bbc10d53 100644 --- a/src/include/knl/knl_guc/knl_session_attr_common.h +++ b/src/include/knl/knl_guc/knl_session_attr_common.h @@ -250,6 +250,7 @@ typedef struct knl_session_attr_common { int time_record_level; bool enable_record_nettime; bool foreign_key_checks; + bool enable_dump_trigger_definer; } knl_session_attr_common; #endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_COMMON_H_ */ diff --git a/src/test/regress/input/dump_trigger_definer.source b/src/test/regress/input/dump_trigger_definer.source new file mode 100644 index 0000000000..66710252fb --- /dev/null +++ b/src/test/regress/input/dump_trigger_definer.source @@ -0,0 +1,29 @@ +create database src; +create database dest; +create user testuser password 'Aa@123456'; +ALTER DATABASE src OWNER TO testuser; +GRANT ALL PRIVILEGES ON DATABASE src to testuser; +ALTER DATABASE dest OWNER TO testuser; +GRANT ALL PRIVILEGES ON DATABASE dest to testuser; +\c src +ALTER SESSION SET SESSION AUTHORIZATION testuser PASSWORD 'Aa@123456'; +create schema testuser; +SET search_path = testuser; +CREATE TABLE test_tri ( + id integer, + salary integer +); +CREATE FUNCTION t_test_tri_inlinefunc() RETURNS trigger + LANGUAGE plpgsql NOT SHIPPABLE SECURITY DEFINER + AS $$ DECLARE begin +raise info 'DML on table test_tri!'; + return NEW;end$$; +CREATE TRIGGER t BEFORE INSERT OR DELETE OR UPDATE OF salary, id ON testuser.test_tri FOR EACH STATEMENT EXECUTE PROCEDURE testuser.t_test_tri_inlinefunc(); +select tgname, tgisinternal, pg_get_userbyid(tgowner) AS owner FROM pg_trigger; +\! @abs_bindir@/gs_dump src -p @portstring@ -f @abs_bindir@/sss.sql >/dev/null 2>&1; echo $? +\! @abs_bindir@/gsql -p @portstring@ -d dest -f @abs_bindir@/sss.sql >/dev/null 2>&1; echo $? +\c dest +select tgname, tgisinternal, pg_get_userbyid(tgowner) AS owner FROM pg_trigger; +\c postgres +drop database src; +drop database dest; diff --git a/src/test/regress/output/cursor_expression_dump.source b/src/test/regress/output/cursor_expression_dump.source index 75c4932252..6648478923 100644 --- a/src/test/regress/output/cursor_expression_dump.source +++ b/src/test/regress/output/cursor_expression_dump.source @@ -81,6 +81,7 @@ SET SET SET SET +SET CREATE SCHEMA ALTER SCHEMA SET diff --git a/src/test/regress/output/dump_trigger_definer.source b/src/test/regress/output/dump_trigger_definer.source new file mode 100644 index 0000000000..7b94c23052 --- /dev/null +++ b/src/test/regress/output/dump_trigger_definer.source @@ -0,0 +1,41 @@ +create database src; +create database dest; +create user testuser password 'Aa@123456'; +ALTER DATABASE src OWNER TO testuser; +GRANT ALL PRIVILEGES ON DATABASE src to testuser; +ALTER DATABASE dest OWNER TO testuser; +GRANT ALL PRIVILEGES ON DATABASE dest to testuser; +\c src +ALTER SESSION SET SESSION AUTHORIZATION testuser PASSWORD 'Aa@123456'; +create schema testuser; +SET search_path = testuser; +CREATE TABLE test_tri ( + id integer, + salary integer +); +CREATE FUNCTION t_test_tri_inlinefunc() RETURNS trigger + LANGUAGE plpgsql NOT SHIPPABLE SECURITY DEFINER + AS $$ DECLARE begin +raise info 'DML on table test_tri!'; + return NEW;end$$; +CREATE TRIGGER t BEFORE INSERT OR DELETE OR UPDATE OF salary, id ON testuser.test_tri FOR EACH STATEMENT EXECUTE PROCEDURE testuser.t_test_tri_inlinefunc(); +select tgname, tgisinternal, pg_get_userbyid(tgowner) AS owner FROM pg_trigger; + tgname | tgisinternal | owner +--------+--------------+---------- + t | f | testuser +(1 row) + +\! @abs_bindir@/gs_dump src -p @portstring@ -f @abs_bindir@/sss.sql >/dev/null 2>&1; echo $? +0 +\! @abs_bindir@/gsql -p @portstring@ -d dest -f @abs_bindir@/sss.sql >/dev/null 2>&1; echo $? +0 +\c dest +select tgname, tgisinternal, pg_get_userbyid(tgowner) AS owner FROM pg_trigger; + tgname | tgisinternal | owner +--------+--------------+---------- + t | f | testuser +(1 row) + +\c postgres +drop database src; +drop database dest; diff --git a/src/test/regress/output/event_trigger_dump_restore.source b/src/test/regress/output/event_trigger_dump_restore.source index 6b0ae8e1de..3ebca21a4d 100644 --- a/src/test/regress/output/event_trigger_dump_restore.source +++ b/src/test/regress/output/event_trigger_dump_restore.source @@ -45,6 +45,7 @@ SET SET SET SET +SET CREATE FUNCTION ALTER FUNCTION CREATE EVENT TRIGGER diff --git a/src/test/regress/output/mysql_function.source b/src/test/regress/output/mysql_function.source index 260aec06d8..c32ab7d57d 100755 --- a/src/test/regress/output/mysql_function.source +++ b/src/test/regress/output/mysql_function.source @@ -116,6 +116,7 @@ select usename from pg_user where usesysid = (select proowner from pg_proc wher --? .* --? .* --? .* +--? .* \c mysqltestbak \sf proc_definer1 CREATE DEFINER = testusr1 PROCEDURE public.proc_definer1() diff --git a/src/test/regress/output/plpgsql_dump.source b/src/test/regress/output/plpgsql_dump.source index 4c3f5633f9..3a9eab9c40 100644 --- a/src/test/regress/output/plpgsql_dump.source +++ b/src/test/regress/output/plpgsql_dump.source @@ -113,6 +113,7 @@ SET SET SET SET +SET CREATE SCHEMA ALTER SCHEMA SET diff --git a/src/test/regress/output/test_float_dump.source b/src/test/regress/output/test_float_dump.source index 4040337fe2..c5b084cc52 100644 --- a/src/test/regress/output/test_float_dump.source +++ b/src/test/regress/output/test_float_dump.source @@ -44,6 +44,7 @@ SET SET SET SET +SET CREATE TABLE ALTER TABLE REVOKE diff --git a/src/test/regress/output/view_definer_test.source b/src/test/regress/output/view_definer_test.source index 4843666efc..4bb88eabfb 100755 --- a/src/test/regress/output/view_definer_test.source +++ b/src/test/regress/output/view_definer_test.source @@ -158,6 +158,7 @@ SET SET SET SET +SET CREATE TABLE ALTER TABLE CREATE VIEW diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index da10910d51..df76ab182a 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -73,7 +73,7 @@ test: transaction_with_snapshot test: select_into_user_defined_variables test: select_into_file -test: gs_dump_package trigger_dump gs_dumpall gs_dump_synonym +test: gs_dump_package trigger_dump gs_dumpall gs_dump_synonym dump_trigger_definer test: out_param_func #test: sqlcode_cursor test: gs_dump_tableconstraint @@ -1129,4 +1129,4 @@ test: enable_expr_fusion_flatten test: on_update_session1 on_update_session2 test: ts_gb18030_utf8 -test: backup_tool_audit \ No newline at end of file +test: backup_tool_audit diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 4dbf423ba4..b30fb3f5cb 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -63,7 +63,7 @@ test: set_transaction_test test: select_into_user_defined_variables test: select_into_file -test: gs_dump_package trigger_dump gs_dump_synonym +test: gs_dump_package trigger_dump gs_dump_synonym dump_trigger_definer test: out_param_func out_param_func_overload #test: sqlcode_cursor test: gs_dump_tableconstraint -- Gitee From f8fab22fe7259487c838653faa73519fe1b70213 Mon Sep 17 00:00:00 2001 From: chendong76 <1209756284@qq.com> Date: Sat, 21 Sep 2024 18:14:41 +0800 Subject: [PATCH 337/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=AE=9E=E6=97=B6?= =?UTF-8?q?=E6=9E=84=E5=BB=BA=E9=95=BF=E7=A8=B3=E4=B8=8B=E7=9A=84=E5=86=85?= =?UTF-8?q?=E5=AD=98=E6=B3=84=E6=BC=8F=E9=97=AE=E9=A2=98;=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E5=AE=9E=E6=97=B6=E6=9E=84=E5=BB=BA=E5=8D=A0=E7=94=A8?= =?UTF-8?q?=E7=9A=84xlog-record=E5=86=85=E5=AD=98=E7=BB=9F=E8=AE=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/catalog/builtin_funcs.ini | 2 +- src/common/backend/utils/adt/pgstatfuncs.cpp | 17 +++++++++++------ src/common/backend/utils/init/globals.cpp | 2 +- .../transam/ondemand_extreme_rto/batch_redo.cpp | 2 +- .../transam/ondemand_extreme_rto/dispatcher.cpp | 7 +++++++ .../transam/ondemand_extreme_rto/page_redo.cpp | 3 +++ .../transam/ondemand_extreme_rto/xlog_read.cpp | 7 ++++--- .../access/ondemand_extreme_rto/dispatcher.h | 2 ++ .../rollback_catalog_maindb_92_954.sql | 15 +++++++++++++++ .../rollback_catalog_otherdb_92_954.sql | 15 +++++++++++++++ .../upgrade-post_catalog_maindb_92_954.sql | 17 +++++++++++++++++ .../upgrade-post_catalog_otherdb_92_954.sql | 17 +++++++++++++++++ src/include/ddes/dms/ss_dms_recovery.h | 2 ++ 13 files changed, 96 insertions(+), 12 deletions(-) create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_954.sql create mode 100644 src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_954.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_954.sql create mode 100644 src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_954.sql diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 4de7bb084b..4ca3ea048e 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -2502,7 +2502,7 @@ ), AddFuncGroup( "ondemand_recovery_status", 1, - AddBuiltinFunc(_0(6991), _1("ondemand_recovery_status"), _2(0), _3(false), _4(false), _5(get_ondemand_recovery_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(0), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(0), _21(10, TEXTOID, TEXTOID, OIDOID, OIDOID, OIDOID, OIDOID, BOOLOID, TEXTOID, TEXTOID, TEXTOID), _22(10, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "primary_checkpoint_redo_lsn", "realtime_build_replayed_lsn", "hashmap_used_blocks", "hashmap_total_blocks", "trxn_queue_blocks", "seg_queue_blocks", "in_ondemand_recovery", "ondemand_recovery_status", "realtime_build_status", "recovery_pause_status"), _24(NULL), _25("get_ondemand_recovery_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(6991), _1("ondemand_recovery_status"), _2(0), _3(false), _4(false), _5(get_ondemand_recovery_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(0), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(0), _21(12, TEXTOID, TEXTOID, OIDOID, OIDOID, OIDOID, OIDOID, BOOLOID, TEXTOID, TEXTOID, TEXTOID, OIDOID, OIDOID), _22(12, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(12, "primary_checkpoint_redo_lsn", "realtime_build_replayed_lsn", "hashmap_used_blocks", "hashmap_total_blocks", "trxn_queue_blocks", "seg_queue_blocks", "in_ondemand_recovery", "ondemand_recovery_status", "realtime_build_status", "recovery_pause_status", "record_item_num", "record_item_mbytes"), _24(NULL), _25("get_ondemand_recovery_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "dss_io_stat", 1, diff --git a/src/common/backend/utils/adt/pgstatfuncs.cpp b/src/common/backend/utils/adt/pgstatfuncs.cpp index 0f127f4c0b..35b35fffb0 100644 --- a/src/common/backend/utils/adt/pgstatfuncs.cpp +++ b/src/common/backend/utils/adt/pgstatfuncs.cpp @@ -97,7 +97,7 @@ #define DISPLACEMENTS_VALUE 32 #define MAX_DURATION_TIME 60 #define DSS_IO_STAT_COLUMN_NUM 3 -#define ONDEMAND_RECOVERY_STAT_COLUMN_NUM 10 +#define ONDEMAND_RECOVERY_STAT_COLUMN_NUM 12 const uint32 INDEX_STATUS_VIEW_COL_NUM = 3; @@ -14797,6 +14797,8 @@ Datum get_ondemand_recovery_status(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, (AttrNumber)i++, "ondemand_recovery_status", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)i++, "realtime_build_status", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)i++, "recovery_pause_status", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "record_item_num", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "record_item_mbytes", OIDOID, -1, 0); tupdesc = BlessTupleDesc(tupdesc); @@ -14860,24 +14862,27 @@ Datum get_ondemand_recovery_status(PG_FUNCTION_ARGS) switch (stat.recoveryPauseStatus) { case NOT_PAUSE: - values[i] = CStringGetTextDatum("NOT PAUSE"); + values[i++] = CStringGetTextDatum("NOT PAUSE"); break; case PAUSE_FOR_SYNC_REDO: - values[i] = CStringGetTextDatum("PAUSE(for sync record)"); + values[i++] = CStringGetTextDatum("PAUSE(for sync record)"); break; case PAUSE_FOR_PRUNE_HASHMAP: - values[i] = CStringGetTextDatum("PAUSE(for hashmap full)"); + values[i++] = CStringGetTextDatum("PAUSE(for hashmap full)"); break; case PAUSE_FOR_PRUNE_TRXN_QUEUE: - values[i] = CStringGetTextDatum("PAUSE(for trxn queue full)"); + values[i++] = CStringGetTextDatum("PAUSE(for trxn queue full)"); break; case PAUSE_FOR_PRUNE_SEG_QUEUE: - values[i] = CStringGetTextDatum("PAUSE(for seg queue full)"); + values[i++] = CStringGetTextDatum("PAUSE(for seg queue full)"); break; default: ereport(ERROR, (errmsg("Invalid recovery pause status."))); break; } + uint32 recordItemMemUsedInMB = stat.recordItemMemUsed / 1024 / 1024; + values[i++] = UInt32GetDatum(stat.recordItemNum); + values[i++] = UInt32GetDatum(recordItemMemUsedInMB); HeapTuple heap_tuple = heap_form_tuple(tupdesc, values, nulls); result = HeapTupleGetDatum(heap_tuple); diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index be1ccad686..a0ded481de 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -76,7 +76,7 @@ bool will_shutdown = false; * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92953; +const uint32 GRAND_VERSION_NUM = 92954; /******************************************** * 2.VERSION NUM FOR EACH FEATURE diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/batch_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/batch_redo.cpp index ca5f2b1a0e..fce5f17bc5 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/batch_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/batch_redo.cpp @@ -118,7 +118,7 @@ ondemand_htab_ctrl_t *PRRedoItemHashInitialize(MemoryContext context) ctl.hash = RedoItemTagHash; ctl.match = RedoItemTagMatch; htab_ctrl->hTab = hash_create("Redo item hash by relfilenode and blocknum", INITredoItemHashSIZE, &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE | HASH_SHRCTX); htab_ctrl->nextHTabCtrl = NULL; htab_ctrl->maxRedoItemPtr = InvalidXLogRecPtr; diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp index 4b456cd4d7..823e2f7cfe 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/dispatcher.cpp @@ -1742,6 +1742,13 @@ void FreeRedoItem(RedoItem *item) CountXLogNumbers(&item->record); } ClearRecordInfo(&item->record); + // for less memory in ondemand realtime build + if (SS_ONDEMAND_REALTIME_BUILD_NORMAL && item->record.readRecordBufSize > ONDEMAND_RECORD_BUFFER_ALLOC_STEP) { + pfree(item->record.readRecordBuf); + pg_atomic_sub_fetch_u64(&g_dispatcher->curItemRecordBufMemSize, item->record.readRecordBufSize); + item->record.readRecordBuf = NULL; + item->record.readRecordBufSize = 0; + } pg_write_barrier(); RedoItem *oldHead = (RedoItem *)pg_atomic_read_uintptr((uintptr_t *)&g_dispatcher->freeHead); do { diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp index 688a3b34fa..e4b16d88fc 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/page_redo.cpp @@ -4273,6 +4273,9 @@ void GetOndemandRecoveryStatus(ondemand_recovery_stat *stat) stat->ondemandRecoveryStatus = g_instance.dms_cxt.SSRecoveryInfo.cluster_ondemand_status; stat->realtimeBuildStatus = g_instance.dms_cxt.SSRecoveryInfo.ondemand_realtime_build_status; stat->recoveryPauseStatus = g_instance.dms_cxt.SSRecoveryInfo.ondemand_recovery_pause_status; + stat->recordItemNum = pg_atomic_read_u32(&g_dispatcher->curItemNum); + stat->recordItemMemUsed = stat->recordItemNum * sizeof(RedoItem) + + pg_atomic_read_u64(&g_dispatcher->curItemRecordBufMemSize); } void RealtimeBuildReleaseRecoveryLatch(int code, Datum arg) { diff --git a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp index 2e977086f9..61aad7dc2b 100644 --- a/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp +++ b/src/gausskernel/storage/access/transam/ondemand_extreme_rto/xlog_read.cpp @@ -403,17 +403,17 @@ err: bool ondemand_allocate_recordbuf(XLogReaderState *state, uint32 reclength) { uint32 newSize = reclength; - const uint32 recordBufferAllocStep = 512; if (SS_ONDEMAND_REALTIME_BUILD_NORMAL) { - newSize += recordBufferAllocStep - (newSize % recordBufferAllocStep); + newSize += ONDEMAND_RECORD_BUFFER_ALLOC_STEP - (newSize % ONDEMAND_RECORD_BUFFER_ALLOC_STEP); } else { newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ); } - newSize = Max(newSize, recordBufferAllocStep); + newSize = Max(newSize, ONDEMAND_RECORD_BUFFER_ALLOC_STEP); if (state->readRecordBuf != NULL) { pfree(state->readRecordBuf); + pg_atomic_sub_fetch_u64(&g_dispatcher->curItemRecordBufMemSize, state->readRecordBufSize); state->readRecordBuf = NULL; } state->readRecordBuf = (char *)palloc_extended(newSize, MCXT_ALLOC_NO_OOM); @@ -423,6 +423,7 @@ bool ondemand_allocate_recordbuf(XLogReaderState *state, uint32 reclength) } state->readRecordBufSize = newSize; + pg_atomic_add_fetch_u64(&g_dispatcher->curItemRecordBufMemSize, newSize); return true; } diff --git a/src/include/access/ondemand_extreme_rto/dispatcher.h b/src/include/access/ondemand_extreme_rto/dispatcher.h index 463cc2d829..c2e69d89b0 100644 --- a/src/include/access/ondemand_extreme_rto/dispatcher.h +++ b/src/include/access/ondemand_extreme_rto/dispatcher.h @@ -149,6 +149,7 @@ typedef struct { uint64 pprCostTime; uint32 maxItemNum; uint32 curItemNum; + uint64 curItemRecordBufMemSize; uint32 syncEnterCount; uint32 syncExitCount; @@ -203,6 +204,7 @@ extern THR_LOCAL RecordBufferState *g_recordbuffer; const static uint64 OUTPUT_WAIT_COUNT = 0x7FFFFFF; const static uint64 PRINT_ALL_WAIT_COUNT = 0x7FFFFFFFF; +static const uint32 ONDEMAND_RECORD_BUFFER_ALLOC_STEP = 512; extern RedoItem g_redoEndMark; extern RedoItem g_terminateMark; extern uint32 g_readManagerTriggerFlag; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_954.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_954.sql new file mode 100644 index 0000000000..80f59ac154 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_954.sql @@ -0,0 +1,15 @@ +DROP FUNCTION IF EXISTS pg_catalog.ondemand_recovery_status() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6991; +CREATE FUNCTION pg_catalog.ondemand_recovery_status( + out primary_checkpoint_redo_lsn text, + out realtime_build_replayed_lsn text, + out hashmap_used_blocks oid, + out hashmap_total_blocks oid, + out trxn_queue_blocks oid, + out seg_queue_blocks oid, + out in_ondemand_recovery boolean, + out ondemand_recovery_status text, + out realtime_build_status text, + out recovery_pause_status text +) +RETURNS SETOF record LANGUAGE INTERNAL as 'ondemand_recovery_status' stable; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_954.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_954.sql new file mode 100644 index 0000000000..80f59ac154 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_954.sql @@ -0,0 +1,15 @@ +DROP FUNCTION IF EXISTS pg_catalog.ondemand_recovery_status() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6991; +CREATE FUNCTION pg_catalog.ondemand_recovery_status( + out primary_checkpoint_redo_lsn text, + out realtime_build_replayed_lsn text, + out hashmap_used_blocks oid, + out hashmap_total_blocks oid, + out trxn_queue_blocks oid, + out seg_queue_blocks oid, + out in_ondemand_recovery boolean, + out ondemand_recovery_status text, + out realtime_build_status text, + out recovery_pause_status text +) +RETURNS SETOF record LANGUAGE INTERNAL as 'ondemand_recovery_status' stable; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_954.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_954.sql new file mode 100644 index 0000000000..bbe9279b91 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_954.sql @@ -0,0 +1,17 @@ +DROP FUNCTION IF EXISTS pg_catalog.ondemand_recovery_status() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6991; +CREATE FUNCTION pg_catalog.ondemand_recovery_status( + out primary_checkpoint_redo_lsn text, + out realtime_build_replayed_lsn text, + out hashmap_used_blocks oid, + out hashmap_total_blocks oid, + out trxn_queue_blocks oid, + out seg_queue_blocks oid, + out in_ondemand_recovery boolean, + out ondemand_recovery_status text, + out realtime_build_status text, + out recovery_pause_status text, + out record_item_num oid, + out record_item_mbytes oid +) +RETURNS SETOF record LANGUAGE INTERNAL as 'ondemand_recovery_status' stable; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_954.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_954.sql new file mode 100644 index 0000000000..bbe9279b91 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_954.sql @@ -0,0 +1,17 @@ +DROP FUNCTION IF EXISTS pg_catalog.ondemand_recovery_status() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6991; +CREATE FUNCTION pg_catalog.ondemand_recovery_status( + out primary_checkpoint_redo_lsn text, + out realtime_build_replayed_lsn text, + out hashmap_used_blocks oid, + out hashmap_total_blocks oid, + out trxn_queue_blocks oid, + out seg_queue_blocks oid, + out in_ondemand_recovery boolean, + out ondemand_recovery_status text, + out realtime_build_status text, + out recovery_pause_status text, + out record_item_num oid, + out record_item_mbytes oid +) +RETURNS SETOF record LANGUAGE INTERNAL as 'ondemand_recovery_status' stable; \ No newline at end of file diff --git a/src/include/ddes/dms/ss_dms_recovery.h b/src/include/ddes/dms/ss_dms_recovery.h index b700340ce4..026de8f272 100644 --- a/src/include/ddes/dms/ss_dms_recovery.h +++ b/src/include/ddes/dms/ss_dms_recovery.h @@ -124,6 +124,8 @@ typedef struct ondemand_recovery_stat { SSGlobalClusterState ondemandRecoveryStatus; ondemand_realtime_build_status_t realtimeBuildStatus; ondemand_recovery_pause_status_t recoveryPauseStatus; + uint32 recordItemNum; + uint64 recordItemMemUsed; } ondemand_recovery_stat; typedef struct ss_recovery_info { -- Gitee From 105521c7d25f35bbb683a926356f0c4173f59b66 Mon Sep 17 00:00:00 2001 From: luqichao Date: Sun, 22 Sep 2024 20:15:07 +0800 Subject: [PATCH 338/347] fix issue of consistence --- .../storage/replication/walsender.cpp | 34 ++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/replication/walsender.cpp b/src/gausskernel/storage/replication/walsender.cpp index e597629832..31b530c4f2 100755 --- a/src/gausskernel/storage/replication/walsender.cpp +++ b/src/gausskernel/storage/replication/walsender.cpp @@ -5438,7 +5438,39 @@ static void XLogSendPhysical(char* xlogPath) XLByteAdvance(endptr, g_instance.attr.attr_storage.MaxSendSize * 1024); /* if we went beyond SendRqstPtr, back off */ - if (XLByteLE(SendRqstPtr, endptr)) { + if (g_instance.attr.attr_storage.enable_uwal) { + if (t_thrd.xlog_cxt.uwalInfo.info.dataSize == 0) { + if (0 != GsUwalQueryByUser(t_thrd.xlog_cxt.ThisTimeLineID, false)) { + ereport(PANIC, (errcode_for_file_access(), errmsg("uwal query by user failed"))); + } + } else { + if (0 != GsUwalQuery(&t_thrd.xlog_cxt.uwalInfo.id, &t_thrd.xlog_cxt.uwalInfo.info)) { + ereport(LOG, (errmsg("walsender xlogread GsUwalQuery return failed"))); + return; + } + } + if (t_thrd.xlog_cxt.uwalInfo.info.dataSize > 0) { + if (XLByteLE(t_thrd.xlog_cxt.uwalInfo.info.truncateOffset, startptr) || XLByteLE(SendRqstPtr, endptr)) { + endptr = SendRqstPtr; + t_thrd.walsender_cxt.walSndCaughtUp = true; + return; + } else { + endptr -= (endptr % XLOG_BLCKSZ); + t_thrd.walsender_cxt.walSndCaughtUp = false; + t_thrd.walsender_cxt.catchup_threshold = XLByteDifference(SendRqstPtr, endptr); + } + } else { + if (XLByteLE(SendRqstPtr, endptr)) { + endptr = SendRqstPtr; + t_thrd.walsender_cxt.walSndCaughtUp = true; + } else { + /* round down to page boundary. */ + endptr -= (endptr % XLOG_BLCKSZ); + t_thrd.walsender_cxt.walSndCaughtUp = false; + t_thrd.walsender_cxt.catchup_threshold = XLByteDifference(SendRqstPtr, endptr); + } + } + } else if (XLByteLE(SendRqstPtr, endptr)) { endptr = SendRqstPtr; t_thrd.walsender_cxt.walSndCaughtUp = true; } else { -- Gitee From dbbc6c0411289789ee013417b0aeb30688280730 Mon Sep 17 00:00:00 2001 From: luqichao Date: Sun, 15 Sep 2024 18:29:59 +0800 Subject: [PATCH 339/347] valid most_available_sync when uwal on --- .../backend/utils/misc/guc/guc_storage.cpp | 16 +++++++++++++++- src/gausskernel/storage/gs_uwal/gs_uwal.cpp | 5 +++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 2011581669..418bf997a9 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -214,6 +214,8 @@ static bool check_and_assign_namespace_oids(List* elemlist); static bool check_and_assign_general_oids(List* elemlist); static int GetLengthAndCheckReplConn(const char* ConnInfoList); +static bool check_most_available_sync_param(bool* newval, void** extra, GucSource source); + static bool check_ss_interconnect_url(char **newval, void **extra, GucSource source); static bool check_ss_ock_log_path(char **newval, void **extra, GucSource source); static bool check_ss_interconnect_type(char **newval, void **extra, GucSource source); @@ -811,7 +813,7 @@ static void InitStorageConfigureNamesBool() }, &u_sess->attr.attr_storage.guc_most_available_sync, false, - NULL, + check_most_available_sync_param, NULL, NULL}, {{"enable_show_any_tuples", @@ -6568,6 +6570,18 @@ static int GetLengthAndCheckReplConn(const char* ConnInfoList) return repl_len; } +static bool check_most_available_sync_param(bool* newval, void** extra, GucSource source) +{ + if (source == PGC_S_DEFAULT) { + return true; + } + if (*newval && g_instance.attr.attr_storage.enable_uwal) { + ereport(ERROR, (errmsg("Do not allow both enable uwal and most_available_sync"))); + return false; + } + return true; +} + static bool check_ss_interconnect_type(char **newval, void **extra, GucSource source) { return (strcmp("TCP", *newval) == 0 || strcmp("RDMA", *newval) == 0); diff --git a/src/gausskernel/storage/gs_uwal/gs_uwal.cpp b/src/gausskernel/storage/gs_uwal/gs_uwal.cpp index c027df8af2..73d8b66327 100644 --- a/src/gausskernel/storage/gs_uwal/gs_uwal.cpp +++ b/src/gausskernel/storage/gs_uwal/gs_uwal.cpp @@ -448,6 +448,11 @@ int GsUwalInit(ServerMode serverMode) } } + if ((volatile bool)u_sess->attr.attr_storage.guc_most_available_sync) { + ereport(ERROR, (errmsg("uwal only support most_available_sync is 'off'"))); + return ret; + } + if (GsUwalLoadSymbols() != 0) { ereport(ERROR, (errmsg("failed to dlopen libuwal.so"))); return ret; -- Gitee From dc8201832ea473ebf006649d5e45cad0151bac5c Mon Sep 17 00:00:00 2001 From: lyanna <1016943941@qq.com> Date: Mon, 23 Sep 2024 11:47:46 +0800 Subject: [PATCH 340/347] =?UTF-8?q?fix=20undorecycle=20core=20=EF=BC=88che?= =?UTF-8?q?rry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../access/ustore/undo/knl_uundorecycle.cpp | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index a513dffada..916ae7133f 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -127,7 +127,7 @@ bool VerifyFrozenXidAdvance(TransactionId oldestXmin, TransactionId globalFrozen return true; } if (TransactionIdIsNormal(globalFrozenXid) && TransactionIdFollows(globalFrozenXid, oldestXmin)) { - ereport(PANIC, (errmodule(MOD_UNDO), + ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "Advance frozen xid failed, globalFrozenXid %lu is bigger than oldestXmin %lu."), globalFrozenXid, oldestXmin))); @@ -135,7 +135,7 @@ bool VerifyFrozenXidAdvance(TransactionId oldestXmin, TransactionId globalFrozen if (TransactionIdIsNormal(globalFrozenXid) && TransactionIdIsNormal(g_instance.undo_cxt.globalFrozenXid) && TransactionIdPrecedes(globalFrozenXid, g_instance.undo_cxt.globalFrozenXid)) { - ereport(PANIC, (errmodule(MOD_UNDO), + ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "Advance frozen xid failed, globalFrozenXid %lu is smaller than globalFrozenXid %lu."), globalFrozenXid, g_instance.undo_cxt.globalFrozenXid))); @@ -143,7 +143,7 @@ bool VerifyFrozenXidAdvance(TransactionId oldestXmin, TransactionId globalFrozen if (TransactionIdIsNormal(globalFrozenXid) && TransactionIdIsNormal(g_instance.undo_cxt.globalRecycleXid) && TransactionIdPrecedes(globalFrozenXid, g_instance.undo_cxt.globalRecycleXid)) { - ereport(PANIC, (errmodule(MOD_UNDO), + ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "Advance frozen xid failed, globalFrozenXid %lu is smaller than globalRecycleXid %lu."), globalFrozenXid, g_instance.undo_cxt.globalRecycleXid))); @@ -157,19 +157,11 @@ bool VerifyRecycleXidAdvance(TransactionId globalFrozenXid, TransactionId oldest return true; } if (TransactionIdIsNormal(oldestRecycleXid) && TransactionIdFollows(oldestRecycleXid, globalFrozenXid)) { - ereport(PANIC, (errmodule(MOD_UNDO), + ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "Advance recycle xid failed, oldestRecycleXid %lu is bigger than globalFrozenXid %lu."), oldestRecycleXid, globalFrozenXid))); } - if (TransactionIdIsNormal(oldestRecycleXid) && - TransactionIdIsNormal(g_instance.undo_cxt.globalRecycleXid) && - TransactionIdPrecedes(oldestRecycleXid, g_instance.undo_cxt.globalRecycleXid)) { - ereport(PANIC, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT( - "Advance recycle xid failed, oldestRecycleXid %lu is smaller than globalRecycleXid %lu."), - globalFrozenXid, g_instance.undo_cxt.globalRecycleXid))); - } return true; } -- Gitee From 0e5b4fb5d8f75e64247b6ee181ac0feb1b7be273 Mon Sep 17 00:00:00 2001 From: gentle_hu Date: Mon, 23 Sep 2024 09:26:27 +0800 Subject: [PATCH 341/347] disable compress table with guc support_extended_features --- .../optimizer/commands/tablecmds.cpp | 13 ++++++ .../storage/access/common/reloptions.cpp | 13 ++++++ .../regress/expected/create_seg_table.out | 1 + src/test/regress/expected/mysql_condition.out | 6 +++ .../row_compression/alter_compress_params.out | 39 ++++++++++++++++ .../expected/row_compression/normal_test.out | 30 ++++++++++++ .../row_compression/pg_table_size.out | 6 +++ .../row_compression/row_compress_feature.out | 46 +++++++++++++++++++ .../row_compression/unsupported_feature.out | 26 +++++++++++ .../row_compression_basebackup.source | 2 + .../output/row_compression/twophase.source | 19 ++++++++ 11 files changed, 201 insertions(+) diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 779ce0c8fc..33260fbceb 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -1284,6 +1284,7 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), errmsg("There is a conflict caused by storage_type and orientation"))); } + bool noSupportTable = segment || isCStore || isTsStore || relkind != RELKIND_RELATION || stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED || stmt->relation->relpersistence == RELPERSISTENCE_TEMP || @@ -19703,6 +19704,18 @@ bool static transformCompressedOptions(Relation rel, bytea* relOption, List* def return false; } + if (g_instance.attr.attr_common.support_extended_features) { + ereport(WARNING, + (errmsg("The compressed relation you are using is an unofficial supported extended feature.")) + ); + } else { + ereport(ERROR, + (errmsg("The compressed relation you are trying to create or alter " + "is an unofficial supported extended feature."), + errhint("Turn on GUC 'support_extended_features' to enable it.")) + ); + } + /* If the relkind doesn't support compressed options, check if delist contains compressed options. * If does, throw exception. */ diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 1db05d8fcd..20484b00e8 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -3093,6 +3093,19 @@ void SetOneOfCompressOption(DefElem* defElem, TableCreateSupport* tableCreateSup void CheckCompressOption(TableCreateSupport *tableCreateSupport) { + if (tableCreateSupport->compressType) { + if (g_instance.attr.attr_common.support_extended_features) { + ereport(WARNING, + (errmsg("The compressed relation you are using is an unofficial supported extended feature.")) + ); + } else { + ereport(ERROR, + (errmsg("The compressed relation you are trying to create " + "is an unofficial supported extended feature."), + errhint("Turn on GUC 'support_extended_features' to enable it.")) + ); + } + } #ifdef ENABLE_FINANCE_MODE if (HasCompressOption(tableCreateSupport)) { ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), diff --git a/src/test/regress/expected/create_seg_table.out b/src/test/regress/expected/create_seg_table.out index a805970f3d..95bc4bec4b 100644 --- a/src/test/regress/expected/create_seg_table.out +++ b/src/test/regress/expected/create_seg_table.out @@ -29,6 +29,7 @@ create table tab_segment_off(a int) with(segment=off); ERROR: Only support segment storage type while parameter enable_segment is ON. create table tab_segment_on(a int) with(segment=on); create table tab_segment_compress(a int) with(compresstype=2,compress_chunk_size=512,compress_level=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compresstype can not be used in segment table. \d+ tab_segment; Table "public.tab_segment" diff --git a/src/test/regress/expected/mysql_condition.out b/src/test/regress/expected/mysql_condition.out index 1fe74d15b5..b2a1bfc04c 100644 --- a/src/test/regress/expected/mysql_condition.out +++ b/src/test/regress/expected/mysql_condition.out @@ -138,6 +138,9 @@ begin create table t_rowcompress_pglz_compresslevel(id int) with (compresstype=1,compress_level=2); end; / +WARNING: The compressed relation you are using is an unofficial supported extended feature. +CONTEXT: SQL statement "create table t_rowcompress_pglz_compresslevel(id int) with (compresstype=1,compress_level=2)" +PL/pgSQL function inline_code_block line 6 at SQL statement NOTICE: SQLSTATE = 02002, SQLCODE = 33554560, SQLERRM = compress_level should be used with ZSTD algorithm. -- sqlexception declare @@ -179,6 +182,9 @@ begin a := 1/0; end; / +WARNING: The compressed relation you are using is an unofficial supported extended feature. +CONTEXT: SQL statement "create table t_rowcompress_pglz_compresslevel(id int) with (compresstype=1,compress_level=2)" +PL/pgSQL function inline_code_block line 11 at SQL statement NOTICE: SQLSTATE = 02002, SQLCODE = 33554560, SQLERRM = compress_level should be used with ZSTD algorithm. declare a int; diff --git a/src/test/regress/expected/row_compression/alter_compress_params.out b/src/test/regress/expected/row_compression/alter_compress_params.out index 84b71353ff..69c4bfa616 100644 --- a/src/test/regress/expected/row_compression/alter_compress_params.out +++ b/src/test/regress/expected/row_compression/alter_compress_params.out @@ -4,26 +4,31 @@ CREATE SCHEMA alter_compress_params_schema; CREATE TABLE alter_compress_params_schema.uncompress_astore_to_cl_30 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_cl_30 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_cl_30 SET (compress_level = 30); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0 DROP TABLE alter_compress_params_schema.uncompress_astore_to_cl_30; CREATE TABLE alter_compress_params_schema.uncompress_astore_to_ccs_512 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_ccs_512 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_ccs_512 SET (compress_chunk_size = 512); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0 DROP TABLE alter_compress_params_schema.uncompress_astore_to_ccs_512; CREATE TABLE alter_compress_params_schema.uncompress_astore_to_ccs_512_cpc_7 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_ccs_512_cpc_7 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_ccs_512_cpc_7 SET (compress_chunk_size = 512, compress_prealloc_chunks = 7); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0 DROP TABLE alter_compress_params_schema.uncompress_astore_to_ccs_512_cpc_7; CREATE TABLE alter_compress_params_schema.uncompress_astore_to_cbc_1 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_cbc_1 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_cbc_1 SET (compress_byte_convert = true); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0 DROP TABLE alter_compress_params_schema.uncompress_astore_to_cbc_1; CREATE TABLE alter_compress_params_schema.uncompress_astore_to_cbc_1_cdc_1 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_cbc_1_cdc_1 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_cbc_1_cdc_1 SET (compress_byte_convert = true, compress_diff_convert = true); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0 DROP TABLE alter_compress_params_schema.uncompress_astore_to_cbc_1_cdc_1; -- the new compression parameters is out of the value range @@ -66,6 +71,7 @@ DROP TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_ CREATE TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_1023 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_1023 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_1023 SET (compresstype = 1, compress_chunk_size = 1023); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: invalid compress_chunk_size 1023, must be one of 512, 1024, 2048 or 4096 DROP TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_1023; CREATE TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_512_cpc_8 (id int, value varchar); @@ -93,6 +99,7 @@ DROP TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_cbc_ CREATE TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_4096_cpc_2 (id int, value varchar); INSERT INTO alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_4096_cpc_2 SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_4096_cpc_2 SET (compresstype = 1, compress_chunk_size = 4096, compress_prealloc_chunks = 2); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: invalid compress_prealloc_chunks 2, must be less than 2 for uncompress_astore_to_compresstype_1_ccs_4096_cpc_2 DROP TABLE alter_compress_params_schema.uncompress_astore_to_compresstype_1_ccs_4096_cpc_2; CREATE TABLE alter_compress_params_schema.uncompress_to_compresstype_3 (id int, value varchar); @@ -105,6 +112,7 @@ DROP TABLE alter_compress_params_schema.uncompress_to_compresstype_3; CREATE TABLE alter_compress_params_schema.alter_column_table_compressed_options (id int, value varchar) WITH (ORIENTATION = column); INSERT INTO alter_compress_params_schema.alter_column_table_compressed_options SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.alter_column_table_compressed_options SET (compresstype = 1); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: Un-support feature DETAIL: Option "compresstype" doesn't allow ALTER DROP TABLE alter_compress_params_schema.alter_column_table_compressed_options; @@ -112,6 +120,7 @@ DROP TABLE alter_compress_params_schema.alter_column_table_compressed_options; CREATE TABLE alter_compress_params_schema.alter_segment_table_compressed_options (id int, value varchar) WITH (segment = on); INSERT INTO alter_compress_params_schema.alter_segment_table_compressed_options SELECT generate_series(1,5), 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'; ALTER TABLE alter_compress_params_schema.alter_segment_table_compressed_options SET (compresstype = 1); -- fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: Un-support feature DETAIL: Option "compresstype" doesn't allow ALTER DROP TABLE alter_compress_params_schema.alter_segment_table_compressed_options; @@ -119,15 +128,19 @@ DROP TABLE alter_compress_params_schema.alter_segment_table_compressed_options; CREATE TABLE alter_compress_params_schema.segment_table (id int, c1 text) WITH( segment=on); CREATE INDEX alter_compress_params_schema.uncompressed_index_test ON alter_compress_params_schema.segment_table(c1); ALTER INDEX alter_compress_params_schema.uncompressed_index_test SET (compresstype = 1); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: Un-support feature DETAIL: Option "compresstype" doesn't allow ALTER DROP INDEX alter_compress_params_schema.uncompressed_index_test; DROP TABLE alter_compress_params_schema.segment_table; CREATE TABLE alter_compress_params_schema.row_table (id int, c1 text); CREATE INDEX alter_compress_params_schema.compressed_index_test ON alter_compress_params_schema.row_table(c1) WITH (compresstype = 1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ALTER INDEX alter_compress_params_schema.compressed_index_test SET (compresstype = 2, compress_level = 15); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compresstype OPTION is not supported ALTER INDEX alter_compress_params_schema.compressed_index_test SET (compresstype = 0); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compresstype OPTION is not supported DROP INDEX alter_compress_params_schema.compressed_index_test; DROP TABLE alter_compress_params_schema.row_table; @@ -138,6 +151,7 @@ ERROR: permission denied: "pg_class" is a system catalog CREATE TABLE alter_compress_params_schema.uncompressed_table_compresstype_1 (id int, c1 text); INSERT INTO alter_compress_params_schema.uncompressed_table_compresstype_1 SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.uncompressed_table_compresstype_1 SET (compresstype = 1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.uncompressed_table_compresstype_1 Table "alter_compress_params_schema.uncompressed_table_compresstype_1" Column | Type | Modifiers | Storage | Stats target | Description @@ -179,6 +193,7 @@ DROP TABLE alter_compress_params_schema.uncompressed_table_compresstype_1; CREATE TABLE alter_compress_params_schema.uncompressed_table_compresstype_2_cl_30 (id int, c1 text); INSERT INTO alter_compress_params_schema.uncompressed_table_compresstype_2_cl_30 SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.uncompressed_table_compresstype_2_cl_30 SET (compresstype = 2, compress_level = 30); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.uncompressed_table_compresstype_2_cl_30 Table "alter_compress_params_schema.uncompressed_table_compresstype_2_cl_30" Column | Type | Modifiers | Storage | Stats target | Description @@ -220,6 +235,7 @@ DROP TABLE alter_compress_params_schema.uncompressed_table_compresstype_2_cl_30; CREATE TABLE alter_compress_params_schema.uncompressed_compresstype_2_cl_30_ccs_2048_cpc_3 (id int, c1 text); INSERT INTO alter_compress_params_schema.uncompressed_compresstype_2_cl_30_ccs_2048_cpc_3 SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.uncompressed_compresstype_2_cl_30_ccs_2048_cpc_3 SET (compresstype = 2, compress_level = 30, compress_chunk_size = 2048, compress_prealloc_chunks = 3); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.uncompressed_compresstype_2_cl_30_ccs_2048_cpc_3 Table "alter_compress_params_schema.uncompressed_compresstype_2_cl_30_ccs_2048_cpc_3" Column | Type | Modifiers | Storage | Stats target | Description @@ -261,6 +277,7 @@ DROP TABLE alter_compress_params_schema.uncompressed_compresstype_2_cl_30_ccs_20 CREATE TABLE alter_compress_params_schema.uncompressed_table_all_options (id int, c1 text); INSERT INTO alter_compress_params_schema.uncompressed_table_all_options SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.uncompressed_table_all_options SET (compresstype = 2, compress_level = 30, compress_chunk_size = 512, compress_prealloc_chunks = 6, compress_byte_convert = true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.uncompressed_table_all_options Table "alter_compress_params_schema.uncompressed_table_all_options" Column | Type | Modifiers | Storage | Stats target | Description @@ -320,6 +337,7 @@ INSERT INTO alter_compress_params_schema.uncompressed_partitioned_compresstype_1 INSERT INTO alter_compress_params_schema.uncompressed_partitioned_compresstype_1 SELECT generate_series(1, 10), 'session3 item', '2021-08-01 00:00:00', 1000, '733'; INSERT INTO alter_compress_params_schema.uncompressed_partitioned_compresstype_1 SELECT generate_series(1, 10), 'session4 item', '2021-11-01 00:00:00', 1000, '744'; ALTER TABLE alter_compress_params_schema.uncompressed_partitioned_compresstype_1 SET (compresstype = 1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'uncompressed_partitioned_compresstype_1_season1'; relname | reloptions -------------------------------------------------+------------------------------------------------- @@ -385,6 +403,7 @@ INSERT INTO alter_compress_params_schema.uncompressed_partitioned_cl_10_ccs_2048 INSERT INTO alter_compress_params_schema.uncompressed_partitioned_cl_10_ccs_2048_cpc_3 SELECT generate_series(1, 10), 'session3 item', '2021-08-01 00:00:00', 1000, '733'; INSERT INTO alter_compress_params_schema.uncompressed_partitioned_cl_10_ccs_2048_cpc_3 SELECT generate_series(1, 10), 'session4 item', '2021-11-01 00:00:00', 1000, '744'; ALTER TABLE alter_compress_params_schema.uncompressed_partitioned_cl_10_ccs_2048_cpc_3 SET (compresstype = 2, compress_level = 30, compress_chunk_size = 2048, compress_prealloc_chunks = 3); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'uncompressed_partitioned_cl_10_ccs_2048_cpc_3_season1'; relname | reloptions -------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------- @@ -450,6 +469,7 @@ INSERT INTO alter_compress_params_schema.uncompressed_partitioned_all_options SE INSERT INTO alter_compress_params_schema.uncompressed_partitioned_all_options SELECT generate_series(1, 10), 'session3 item', '2021-08-01 00:00:00', 1000, '733'; INSERT INTO alter_compress_params_schema.uncompressed_partitioned_all_options SELECT generate_series(1, 10), 'session4 item', '2021-11-01 00:00:00', 1000, '744'; ALTER TABLE alter_compress_params_schema.uncompressed_partitioned_all_options SET (compresstype = 2, compress_level = 30, compress_chunk_size = 512, compress_prealloc_chunks = 7, compress_byte_convert = true, compress_diff_convert = true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'uncompressed_partitioned_all_options_season1'; relname | reloptions ----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -520,6 +540,7 @@ INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_compresstyp INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_compresstype_1 values ('201903', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_compresstype_1 values ('201903', '2', generate_series(1, 10)); ALTER TABLE alter_compress_params_schema.uncompressed_subpartitioned_compresstype_1 SET (compresstype = 1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'uncompressed_subpartitioned_compresstype_1_201901_a'; relname | reloptions -----------------------------------------------------+------------------------------------------------- @@ -588,6 +609,7 @@ INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_cl_10_ccs_2 INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_cl_10_ccs_2048_cpc_3 values ('201903', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_cl_10_ccs_2048_cpc_3 values ('201903', '2', generate_series(1, 10)); ALTER TABLE alter_compress_params_schema.uncompressed_subpartitioned_cl_10_ccs_2048_cpc_3 SET (compresstype = 2, compress_level = 10, compress_chunk_size = 2048, compress_prealloc_chunks = 3); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'uncompressed_subpartitioned_cl_10_ccs_2048_cpc_3_201901_a'; relname | reloptions -----------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------- @@ -656,6 +678,7 @@ INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_all_options INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_all_options values ('201903', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.uncompressed_subpartitioned_all_options values ('201903', '2', generate_series(1, 10)); ALTER TABLE alter_compress_params_schema.uncompressed_subpartitioned_all_options SET (compresstype = 2, compress_level = 30, compress_chunk_size = 512, compress_prealloc_chunks = 7, compress_byte_convert = true, compress_diff_convert = true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'uncompressed_subpartitioned_all_options_201901_a'; relname | reloptions --------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -714,8 +737,10 @@ begin end; $BODY$; CREATE TABLE alter_compress_params_schema.compressed_table_compresstype_2_cl_30 (id int, c1 text) with (compresstype = 1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.compressed_table_compresstype_2_cl_30 SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.compressed_table_compresstype_2_cl_30 SET (compresstype = 2, compress_level = 30); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.compressed_table_compresstype_2_cl_30 Table "alter_compress_params_schema.compressed_table_compresstype_2_cl_30" Column | Type | Modifiers | Storage | Stats target | Description @@ -755,8 +780,10 @@ SELECT nchunks, chunknos FROM pg_catalog.compress_address_details('alter_compres DROP TABLE alter_compress_params_schema.compressed_table_compresstype_2_cl_30; CREATE TABLE alter_compress_params_schema.all_options_table_compresstype_1_cpc_1 (id int, c1 text) with (compresstype = 2, compress_level = 30, compress_chunk_size = 512, compress_prealloc_chunks = 7, compress_byte_convert = true, compress_diff_convert = true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.all_options_table_compresstype_1_cpc_1 SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.all_options_table_compresstype_1_cpc_1 SET (compresstype = 1, compress_level = 0, compress_chunk_size = 4096, compress_prealloc_chunks = 1, compress_byte_convert = false, compress_diff_convert = false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.all_options_table_compresstype_1_cpc_1 Table "alter_compress_params_schema.all_options_table_compresstype_1_cpc_1" Column | Type | Modifiers | Storage | Stats target | Description @@ -823,11 +850,13 @@ PARTITION BY RANGE(sales_date) PARTITION compressed_partitioned_compresstype_2_cl_30_ccs_512_season3 VALUES LESS THAN('2021-10-01 00:00:00'), PARTITION compressed_partitioned_compresstype_2_cl_30_ccs_512_season4 VALUES LESS THAN(MAXVALUE) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.compressed_partitioned_compresstype_2_cl_30_ccs_512 SELECT generate_series(1, 10), 'session1 item', '2021-02-01 00:00:00', 1000, '711'; INSERT INTO alter_compress_params_schema.compressed_partitioned_compresstype_2_cl_30_ccs_512 SELECT generate_series(1, 10), 'session2 item', '2021-05-01 00:00:00', 1000, '722'; INSERT INTO alter_compress_params_schema.compressed_partitioned_compresstype_2_cl_30_ccs_512 SELECT generate_series(1, 10), 'session3 item', '2021-08-01 00:00:00', 1000, '733'; INSERT INTO alter_compress_params_schema.compressed_partitioned_compresstype_2_cl_30_ccs_512 SELECT generate_series(1, 10), 'session4 item', '2021-11-01 00:00:00', 1000, '744'; ALTER TABLE alter_compress_params_schema.compressed_partitioned_compresstype_2_cl_30_ccs_512 SET (compresstype = 2, compress_level = 30, compress_chunk_size = 512); +WARNING: The compressed relation you are using is an unofficial supported extended feature. CHECKPOINT; select relname, reloptions from pg_partition where relname = 'compressed_partitioned_compresstype_2_cl_30_ccs_512_season1'; relname | reloptions @@ -902,11 +931,13 @@ PARTITION BY RANGE(sales_date) PARTITION all_options_partitioned_compresstype_1_cpc_1_season3 VALUES LESS THAN('2021-10-01 00:00:00'), PARTITION all_options_partitioned_compresstype_1_cpc_1_season4 VALUES LESS THAN(MAXVALUE) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.all_options_partitioned_compresstype_1_cpc_1 SELECT generate_series(1, 10), 'session1 item', '2021-02-01 00:00:00', 1000, '711'; INSERT INTO alter_compress_params_schema.all_options_partitioned_compresstype_1_cpc_1 SELECT generate_series(1, 10), 'session2 item', '2021-05-01 00:00:00', 1000, '722'; INSERT INTO alter_compress_params_schema.all_options_partitioned_compresstype_1_cpc_1 SELECT generate_series(1, 10), 'session3 item', '2021-08-01 00:00:00', 1000, '733'; INSERT INTO alter_compress_params_schema.all_options_partitioned_compresstype_1_cpc_1 SELECT generate_series(1, 10), 'session4 item', '2021-11-01 00:00:00', 1000, '744'; ALTER TABLE alter_compress_params_schema.all_options_partitioned_compresstype_1_cpc_1 SET (compresstype = 1, compress_level = 0, compress_chunk_size = 4096, compress_prealloc_chunks = 1, compress_byte_convert = false, compress_diff_convert = false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. CHECKPOINT; select relname, reloptions from pg_partition where relname = 'all_options_partitioned_compresstype_1_cpc_1_season1'; relname | reloptions @@ -973,11 +1004,13 @@ PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) SUBPARTITION all_options_subpartitioned_compresstype_1_cpc_1_201902_b VALUES ( '2' ) ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.all_options_subpartitioned_compresstype_1_cpc_1 values ('201902', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.all_options_subpartitioned_compresstype_1_cpc_1 values ('201902', '2', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.all_options_subpartitioned_compresstype_1_cpc_1 values ('201903', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.all_options_subpartitioned_compresstype_1_cpc_1 values ('201903', '2', generate_series(1, 10)); ALTER TABLE alter_compress_params_schema.all_options_subpartitioned_compresstype_1_cpc_1 SET (compresstype = 1, compress_level = 0, compress_chunk_size = 4096, compress_prealloc_chunks = 1, compress_byte_convert = false, compress_diff_convert = false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. CHECKPOINT; select relname, reloptions from pg_partition where relname = 'all_options_subpartitioned_compresstype_1_cpc_1_201901_a'; relname | reloptions @@ -1037,8 +1070,10 @@ SELECT count(*) FROM compress_statistic_info(compress_func_findpath('alter_compr DROP TABLE alter_compress_params_schema.all_options_subpartitioned_compresstype_1_cpc_1; -- set compressed table to uncompressed table CREATE TABLE alter_compress_params_schema.all_options_table_uncompressed (id int, c1 text) with (compresstype = 2, compress_level = 30, compress_chunk_size = 512, compress_prealloc_chunks = 7, compress_byte_convert = true, compress_diff_convert = true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.all_options_table_uncompressed SELECT generate_series(1, 10), 'fsfsfsfsfsfsfsfsfsfsfsfssfsf'; ALTER TABLE alter_compress_params_schema.all_options_table_uncompressed SET (compresstype = 0, compress_level = 0, compress_chunk_size = 4096, compress_prealloc_chunks = 0, compress_byte_convert = false, compress_diff_convert = false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ alter_compress_params_schema.all_options_table_uncompressed Table "alter_compress_params_schema.all_options_table_uncompressed" Column | Type | Modifiers | Storage | Stats target | Description @@ -1085,11 +1120,13 @@ PARTITION BY RANGE(sales_date) PARTITION all_options_partitioned_uncompressed_season3 VALUES LESS THAN('2021-10-01 00:00:00'), PARTITION all_options_partitioned_uncompressed_season4 VALUES LESS THAN(MAXVALUE) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.all_options_partitioned_uncompressed SELECT generate_series(1, 10), 'session1 item', '2021-02-01 00:00:00', 1000, '711'; INSERT INTO alter_compress_params_schema.all_options_partitioned_uncompressed SELECT generate_series(1, 10), 'session2 item', '2021-05-01 00:00:00', 1000, '722'; INSERT INTO alter_compress_params_schema.all_options_partitioned_uncompressed SELECT generate_series(1, 10), 'session3 item', '2021-08-01 00:00:00', 1000, '733'; INSERT INTO alter_compress_params_schema.all_options_partitioned_uncompressed SELECT generate_series(1, 10), 'session4 item', '2021-11-01 00:00:00', 1000, '744'; ALTER TABLE alter_compress_params_schema.all_options_partitioned_uncompressed SET (compresstype = 0, compress_level = 0, compress_chunk_size = 4096, compress_prealloc_chunks = 0, compress_byte_convert = false, compress_diff_convert = false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'all_options_partitioned_uncompressed_season1'; relname | reloptions ----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -1155,11 +1192,13 @@ PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) SUBPARTITION all_options_subpartitioned_uncompressed_201902_b VALUES ( '2' ) ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. INSERT INTO alter_compress_params_schema.all_options_subpartitioned_uncompressed values ('201902', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.all_options_subpartitioned_uncompressed values ('201902', '2', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.all_options_subpartitioned_uncompressed values ('201903', '1', generate_series(1, 10)); INSERT INTO alter_compress_params_schema.all_options_subpartitioned_uncompressed values ('201903', '2', generate_series(1, 10)); ALTER TABLE alter_compress_params_schema.all_options_subpartitioned_uncompressed SET (compresstype = 0, compress_level = 0, compress_chunk_size = 4096, compress_prealloc_chunks = 0, compress_byte_convert = false, compress_diff_convert = false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select relname, reloptions from pg_partition where relname = 'all_options_subpartitioned_uncompressed_201901_a'; relname | reloptions --------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/src/test/regress/expected/row_compression/normal_test.out b/src/test/regress/expected/row_compression/normal_test.out index 1559c84f5d..69581224f1 100644 --- a/src/test/regress/expected/row_compression/normal_test.out +++ b/src/test/regress/expected/row_compression/normal_test.out @@ -1,5 +1,6 @@ create schema normal_test; CREATE TABLE normal_test.tbl_pc(id int, c1 text) WITH(compresstype=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ normal_test.tbl_pc Table "normal_test.tbl_pc" Column | Type | Modifiers | Storage | Stats target | Description @@ -38,12 +39,17 @@ select count(*) from normal_test.tbl_pc where id < 100; -- normal index create index on normal_test.tbl_pc(id) WITH (compresstype=2,compress_chunk_size=1024); +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter index normal_test.tbl_pc_id_idx set (compresstype=1); --failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compresstype OPTION is not supported alter index normal_test.tbl_pc_id_idx set (compress_chunk_size=2048); --failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compress_chunk_size OPTION is not supported alter index normal_test.tbl_pc_id_idx set (compress_prealloc_chunks=2); --success +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter index normal_test.tbl_pc_id_idx set (compress_level=2); --success +WARNING: The compressed relation you are using is an unofficial supported extended feature. set enable_seqscan = off; set enable_bitmapscan = off; select count(*) from normal_test.tbl_pc; @@ -63,6 +69,7 @@ CREATE TABLE normal_test.tbl_partition(id int) WITH(compresstype=2,compress_chun partition p6 values less than(60000), partition p7 values less than(70000) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into normal_test.tbl_partition select generate_series(1,65000); select count(*) from normal_test.tbl_partition; count @@ -94,6 +101,7 @@ select relname, reloptions from pg_partition where parentid in (Select relfileno (9 rows) create table normal_test.exchange_table(id int) WITH(compresstype=2,compress_chunk_size=1024); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ALTER TABLE normal_test.tbl_partition EXCHANGE PARTITION FOR(2500) WITH TABLE normal_test.exchange_table; NOTICE: Command without UPDATE GLOBAL INDEX will disable global index select count(*) from normal_test.tbl_partition; @@ -121,6 +129,7 @@ select relname, reloptions from pg_partition where parentid in (Select relfileno (10 rows) create index on normal_test.tbl_partition(id) local WITH (compresstype=2,compress_chunk_size=1024); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ normal_test.tbl_partition Table "normal_test.tbl_partition" Column | Type | Modifiers | Storage | Stats target | Description @@ -149,21 +158,29 @@ select relname, reloptions from pg_partition where parentid in (Select relfileno -- unsupport alter index normal_test.tbl_partition_id_idx set (compresstype=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compresstype OPTION is not supported alter index normal_test.tbl_partition_id_idx set (compress_chunk_size=2048); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compress_chunk_size OPTION is not supported alter index normal_test.tbl_partition_id_idx set (compress_prealloc_chunks=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change partition compress_prealloc_chunks OPTION is not supported create index rolcompress_index on normal_test.tbl_pc(id) with (compress_chunk_size=4096); ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. create table rolcompress_table_001(a int) with (compresstype=2, compress_prealloc_chunks=3); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: invalid compress_prealloc_chunks 3, must be less than 2 for rolcompress_table_001 -- support alter table normal_test.tbl_pc set (compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. -- create table like test create table normal_test.including_all(id int) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table normal_test.including_all_new(like normal_test.including_all including all); --success +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table normal_test.including_all_new2(like normal_test.including_all including reloptions); --success +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ normal_test.including_all_new Table "normal_test.including_all_new" Column | Type | Modifiers | Storage | Stats target | Description @@ -181,31 +198,44 @@ Has OIDs: no Options: orientation=row, compresstype=2 create table normal_test.segment_off(id int) with (compresstype=2,segment=off); --success +WARNING: The compressed relation you are using is an unofficial supported extended feature. --compress_diff_convert布尔值: create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert=t); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert='t'); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert='f'); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert=yes); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert='no'); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert='1'); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; --compress_byte_convert布尔值: create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=t,compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert='t',compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=f,compress_diff_convert=false); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=yes,compress_diff_convert=TRUE); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert=NO,compress_diff_convert=OFF); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; create table normal_test.tb1 (c_int int, c_bool boolean) with (Compresstype=2,Compress_chunk_size=512,compress_byte_convert='1',compress_diff_convert=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists normal_test.tb1; --segment参数: create table normal_test.t_bool_value (c_int int, c_bool boolean) with (segment = on); diff --git a/src/test/regress/expected/row_compression/pg_table_size.out b/src/test/regress/expected/row_compression/pg_table_size.out index 15321f37df..1c6175294a 100644 --- a/src/test/regress/expected/row_compression/pg_table_size.out +++ b/src/test/regress/expected/row_compression/pg_table_size.out @@ -2,8 +2,11 @@ create schema table_size_schema; CREATE TABLE table_size_schema.normal_table(id int); CREATE TABLE table_size_schema.compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=1024); +WARNING: The compressed relation you are using is an unofficial supported extended feature. CREATE TABLE table_size_schema.compressed_table_2048(id int) WITH(compresstype=2, compress_chunk_size=2048); +WARNING: The compressed relation you are using is an unofficial supported extended feature. CREATE TABLE table_size_schema.compressed_table_4096(id int) WITH(compresstype=2, compress_chunk_size=4096); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select pg_table_size('table_size_schema.normal_table'); pg_table_size --------------- @@ -41,12 +44,15 @@ partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 create table partition_table_size_schema.compressed_partition_1024(INV_DATE_SK integer) WITH(compresstype=2, compress_chunk_size=1024) partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table partition_table_size_schema.compressed_partition_2048(INV_DATE_SK integer) WITH(compresstype=2, compress_chunk_size=2048) partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table partition_table_size_schema.compressed_partition_4096(INV_DATE_SK integer) WITH(compresstype=2, compress_chunk_size=4096) partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); +WARNING: The compressed relation you are using is an unofficial supported extended feature. select pg_table_size('partition_table_size_schema.normal_partition'); pg_table_size --------------- diff --git a/src/test/regress/expected/row_compression/row_compress_feature.out b/src/test/regress/expected/row_compression/row_compress_feature.out index 4e80358045..2d81308c53 100644 --- a/src/test/regress/expected/row_compression/row_compress_feature.out +++ b/src/test/regress/expected/row_compression/row_compress_feature.out @@ -8,88 +8,110 @@ create schema compress_normal_user; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 2 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=2, compress_chunk_size=512, compress_byte_convert=true, compress_diff_convert=true, compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 3 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=2, compress_chunk_size=512); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 4 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=2, compress_chunk_size=2048, compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 5 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=1, compress_chunk_size=2048, compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 6 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=2, compress_chunk_size=2048, compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 7 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(compresstype=1, compress_chunk_size=512, compress_prealloc_chunks=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','aaaaaaaabbbbbbbbbbbcccccccc', 2, 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc', 'aaaaaaaabbbbbbbbbbbcccccccc'); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 8 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) WITH(compresstype=2, compress_chunk_size=512, compress_prealloc_chunks=5); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','trgtrh', 12365); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 9 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) WITH(compresstype=1, compress_chunk_size=512, compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','trgtrh', 12365); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 10 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (compresstype=2, compress_chunk_size = 512, compress_level = 1, compress_prealloc_chunks=1,compress_diff_convert = true, compress_byte_convert=true) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','trgtrh', 12365); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 11 drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = ustore, compresstype=2, compress_chunk_size = 512, compress_level = 1, compress_prealloc_chunks=1,compress_diff_convert = true, compress_byte_convert=true) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','trgtrh', 12365); create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTWO) WITH(storage_type = ustore,compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; --testcase 12 @@ -97,6 +119,7 @@ drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping create table compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)) WITH(storage_type = ustore, compresstype=2, compress_chunk_size = 512, compress_level = 1, compress_prealloc_chunks=1,compress_diff_convert = true, compress_byte_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. drop table if exists compress_normal_user.row_noncompression_test_tbl1 cascade; NOTICE: table "row_noncompression_test_tbl1" does not exist, skipping create table compress_normal_user.row_noncompression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50)); @@ -121,6 +144,7 @@ select count(*) from compress_normal_user.row_compression_test_tbl1; -- seq scan (1 row) create index compress_normal_user.row_compression_test_tbl1_idx on compress_normal_user.row_compression_test_tbl1(COLUMNONE, COLUMNTHREE) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; select * from compress_normal_user.row_compression_test_tbl1 order by COLUMNONE limit 3; -- index scan columnone | columntwo | columnthree | columnfour | columnfive | columnsix | columnseven | columneight | columnnine | columnten | columneleven | columntwelve | columnthirteen | columnfourteen | columnfifteem @@ -202,16 +226,22 @@ drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; drop table if exists compress_normal_user.row_compression_test_tbl1 cascade; NOTICE: table "row_compression_test_tbl1" does not exist, skipping CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = astore, compresstype=3, compress_chunk_size = 512, compress_level = 1, compress_prealloc_chunks=1,compress_diff_convert = true, compress_byte_convert=true) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); --error +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: row-compression feature current not support algorithm is PGZSTD. CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = astore, compresstype=3, compress_chunk_size = 512, compress_prealloc_chunks=1,compress_diff_convert = true, compress_byte_convert=true) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); --error +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: row-compression feature current not support algorithm is PGZSTD. CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = astore, compresstype=3, compress_chunk_size = 512, compress_prealloc_chunks=1,compress_byte_convert=true) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); --error +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: row-compression feature current not support algorithm is PGZSTD. CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = astore, compresstype=3, compress_chunk_size = 512, compress_prealloc_chunks=1,compress_diff_convert=true) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); --error +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: row-compression feature current not support algorithm is PGZSTD. CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = ustore, compresstype=3, compress_chunk_size = 512, compress_prealloc_chunks=1) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); --error +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: row-compression feature current not support algorithm is PGZSTD. CREATE TABLE compress_normal_user.row_compression_test_tbl1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER) with (storage_type = astore, compresstype=3, compress_chunk_size = 512, compress_prealloc_chunks=1) PARTITION BY RANGE(COLUMNONE)(PARTITION P1 VALUES LESS THAN(1000),PARTITION P2 VALUES LESS THAN(2000),PARTITION P3 VALUES LESS THAN(3000),PARTITION P4 VALUES LESS THAN(4000),PARTITION P5 VALUES LESS THAN(5000),PARTITION P6 VALUES LESS THAN(MAXVALUE)); --ok +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: row-compression feature current not support algorithm is PGZSTD. insert into compress_normal_user.row_compression_test_tbl1 values(generate_series(0, 1600),'ZAINSERT1','trgtrh', 12365); ERROR: relation "compress_normal_user.row_compression_test_tbl1" does not exist on datanode1 @@ -275,8 +305,10 @@ drop table if exists row_noncompression_test_tbl1_kkk cascade; drop table if exists row_compression_test_tbl1_kkk cascade; NOTICE: table "row_compression_test_tbl1_kkk" does not exist, skipping create table row_compression_test_tbl1_kkk(COLUMNONE INTEGER ,COLUMNTWO CHAR(50)) WITH(compresstype=2, compress_chunk_size=512, compress_prealloc_chunks=1, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into row_compression_test_tbl1_kkk values(generate_series(0, 500),'ZAINSERT1'); create index row_compression_test_tbl1_kkk_idx on row_compression_test_tbl1_kkk(COLUMNONE, COLUMNTWO) WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. checkpoint; select compress_func_findpath('row_compression_test_tbl1_kkk'); -- need ignore compress_func_findpath @@ -421,6 +453,7 @@ checkpoint; drop table if exists t_compression_test_1 cascade; NOTICE: table "t_compression_test_1" does not exist, skipping create table t_compression_test_1(COLUMNONE INTEGER ,COLUMNTWO CHAR(50)) WITH(compresstype=2, compress_chunk_size=512, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into t_compression_test_1 values(generate_series(0, 500),'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); checkpoint; select * from t_compression_test_1 order by COLUMNONE limit 10; @@ -470,6 +503,7 @@ NOTICE: table "nocompress_0039" does not exist, skipping create table compress_normal_user.nocompress_0039( COLUMNONE INTEGER ,COLUMNTWO CHAR(50) ,COLUMNTHREE VARCHAR(50) ,COLUMNFOUR INTEGER ,COLUMNFIVE CHAR(50) ,COLUMNSIX VARCHAR(50) ,COLUMNSEVEN CHAR(50) ,COLUMNEIGHT CHAR(50) ,COLUMNNINE VARCHAR(50) ,COLUMNTEN VARCHAR(50) ,COLUMNELEVEN CHAR(50) ,COLUMNTWELVE CHAR(50) ,COLUMNTHIRTEEN VARCHAR(50) ,COLUMNFOURTEEN CHAR(50) ,COLUMNFIFTEEM VARCHAR(50) )WITH(compresstype=2, compress_chunk_size=1024, compress_byte_convert=true, compress_diff_convert=true); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.nocompress_0039 values(generate_series(0,160000 * 1), 'ZAINSERT1','abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 2, 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx'); checkpoint; select count(*) from compress_normal_user.nocompress_0039; @@ -500,6 +534,7 @@ partition p1 values (1), partition p2 values (2), partition p3 values (3) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter table compress_normal_user.t_part_csf11 add partition p6 values(4); alter table compress_normal_user.t_part_csf11 add partition p7 values(5); insert into compress_normal_user.t_part_csf11 values(1, 'dscdscds'),(2, 'dscdscds'),(3, 'dscdscds'),(4, 'dscdscds'),(5, 'dscdscds'); @@ -526,6 +561,7 @@ partition p1 VALUES LESS THAN (1) , partition p2 VALUES LESS THAN (2), partition p3 VALUES LESS THAN (3) ) ; +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter table compress_normal_user.t_part_csf22 add partition p6 VALUES LESS THAN(4) ; alter table compress_normal_user.t_part_csf22 add partition p7 VALUES LESS THAN(5); insert into compress_normal_user.t_part_csf22 values(1, 'dscdscds'),(2, 'dscdscds'),(3, 'dscdscds'); @@ -550,6 +586,7 @@ partition p1, partition p2, partition p3 ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.t_part_csf44 values(1, 'dscdscds'),(2, 'dscdscds'),(3, 'dscdscds'); checkpoint; select * from compress_normal_user.t_part_csf44 order by id asc; @@ -584,6 +621,7 @@ PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) SUBPARTITION p_201902_b VALUES ( '2' ) ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.list_list values('201902', '1', '1', 1),('201902', '2', '1', 1),('201902', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.list_list order by month_code, dept_code; @@ -621,6 +659,7 @@ PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) SUBPARTITION p_201902_b ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.list_hash values('201902', '1', '1', 1),('201902', '2', '1', 1),('201902', '3', '1', 1),('201903', '4', '1', 1),('201903', '5', '1', 1),('201903', '6', '1', 1); checkpoint; select * from compress_normal_user.list_hash order by month_code, dept_code; @@ -658,6 +697,7 @@ PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) SUBPARTITION p_201902_b values less than ('6') ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.list_range values('201902', '1', '1', 1),('201902', '2', '1', 1),('201902', '3', '1', 1),('201903', '4', '1', 1),('201903', '5', '1', 1); checkpoint; select * from compress_normal_user.list_range order by month_code, dept_code; @@ -694,6 +734,7 @@ PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) SUBPARTITION p_201902_b values ('2') ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.range_list values('201902', '1', '1', 1),('201902', '2', '1', 1),('201902', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.range_list order by month_code, dept_code; @@ -731,6 +772,7 @@ PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) SUBPARTITION p_201902_b ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.range_hash values('201902', '1', '1', 1),('201902', '2', '1', 1),('201902', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.range_hash order by month_code, dept_code; @@ -768,6 +810,7 @@ PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.range_range values('201902', '1', '1', 1),('201902', '2', '1', 1),('201902', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.range_range order by month_code, dept_code; @@ -805,6 +848,7 @@ PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) SUBPARTITION p_201902_b VALUES ( '2' ) ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.hash_list values('201901', '1', '1', 1),('201901', '2', '1', 1),('201901', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.hash_list order by month_code, dept_code; @@ -842,6 +886,7 @@ PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) SUBPARTITION p_201902_b ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.hash_hash values('201901', '1', '1', 1),('201901', '2', '1', 1),('201901', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.hash_hash order by month_code, dept_code; @@ -879,6 +924,7 @@ PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) ) ); +WARNING: The compressed relation you are using is an unofficial supported extended feature. insert into compress_normal_user.hash_range values('201901', '1', '1', 1),('201901', '2', '1', 1),('201901', '1', '1', 1),('201903', '2', '1', 1),('201903', '1', '1', 1),('201903', '2', '1', 1); checkpoint; select * from compress_normal_user.hash_range order by month_code, dept_code; diff --git a/src/test/regress/expected/row_compression/unsupported_feature.out b/src/test/regress/expected/row_compression/unsupported_feature.out index 9658f08ef1..548bce2981 100644 --- a/src/test/regress/expected/row_compression/unsupported_feature.out +++ b/src/test/regress/expected/row_compression/unsupported_feature.out @@ -1,21 +1,26 @@ create schema unsupported_feature; -- unspport compressType: 4 CREATE TABLE unsupported_feature.compressed_table_1024(id int) WITH(compresstype=4, compress_chunk_size=1024); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: value 4 out of bounds for option "compresstype" DETAIL: Valid values are between "0" and "2". -- unspport compress_chunk_size: 2000 CREATE TABLE unsupported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=2000); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: invalid compress_chunk_size 2000, must be one of 512, 1024, 2048 or 4096 for compressed_table_1024 -- unspport compress_prealloc_chunks: -1 CREATE TABLE unsupported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_prealloc_chunks=-1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: value -1 out of bounds for option "compress_prealloc_chunks" DETAIL: Valid values are between "0" and "7". -- unspport compress_prealloc_chunks: 8 CREATE TABLE unsupported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_prealloc_chunks=8); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: value 8 out of bounds for option "compress_prealloc_chunks" DETAIL: Valid values are between "0" and "7". -- unspport compress_level: 128 CREATE TABLE unsupported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_level=128); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: value 128 out of bounds for option "compress_level" DETAIL: Valid values are between "-31" and "31". -- compresstype cant be used with column table @@ -40,6 +45,7 @@ CREATE TABLE unsupported_feature.compressed_table_1024(id int) WITH(compress_lev ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. -- unspport exchange CREATE TABLE unsupported_feature.exchange_table(id int) WITH(compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. CREATE TABLE unsupported_feature.alter_table(id int) partition by range(id) ( partition p0 values less than(5000), @@ -55,6 +61,7 @@ ALTER TABLE unsupported_feature.alter_table EXCHANGE PARTITION FOR(2500) WITH TA ERROR: tables in ALTER TABLE EXCHANGE PARTITION must have the same type of compress -- unspport alter compress_chunk_size create TABLE unsupported_feature.alter_table_option(id int) WITH(compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. \d+ unsupported_feature.alter_table_option Table "unsupported_feature.alter_table_option" Column | Type | Modifiers | Storage | Stats target | Description @@ -64,33 +71,48 @@ Has OIDs: no Options: orientation=row, compresstype=2 ALTER TABLE unsupported_feature.alter_table_option SET(compresstype=0); -- success +WARNING: The compressed relation you are using is an unofficial supported extended feature. ALTER TABLE unsupported_feature.alter_table_option SET(compress_chunk_size=2048); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0 ALTER TABLE unsupported_feature.alter_table_option SET(compresstype=2, compress_level=2, compress_prealloc_chunks=0); -- success +WARNING: The compressed relation you are using is an unofficial supported extended feature. -- alter compress_byte_convert\compress_diff_convert create table unsupported_feature.rolcompress_table_001(a int) with (compresstype=2, compress_diff_convert=true); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_diff_convert should be used with compress_byte_convert. create table unsupported_feature.t_rowcompress_0007(cid int, name varchar2) with (compresstype=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter table unsupported_feature.t_rowcompress_0007 set (compress_diff_convert=true); --fail +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_diff_convert should be used with compress_byte_convert. alter table unsupported_feature.t_rowcompress_0007 set (compress_byte_convert=true, compress_diff_convert=true); --success +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter table unsupported_feature.t_rowcompress_0007 set (compress_level=31); --failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level should be used with ZSTD algorithm. create table unsupported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=1,compress_level=2); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level should be used with ZSTD algorithm. create table unsupported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=2,compress_level=2); -- success +WARNING: The compressed relation you are using is an unofficial supported extended feature. CREATE TABLE unsupported_feature.index_test(id int, c1 text); -- segment CREATE TABLE unsupported_feature.segment_table(id int, c1 text) WITH(compresstype=2, segment=on); --failed ERROR: compresstype can not be used in segment table, column table, view, unlogged table or temp table. CREATE INDEX on unsupported_feature.index_test(c1) WITH(compresstype=2, segment=on); --failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: Can not use compress option in segment storage. -- set compress_diff_convert create table unsupported_feature.compress_byte_test(id int) with (compresstype=2, compress_byte_convert=false, compress_diff_convert = true); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_diff_convert should be used with compress_byte_convert. create table unsupported_feature.test(id int) with (compresstype=2); -- success +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter table unsupported_feature.test set(Compresstype=1); -- success +WARNING: The compressed relation you are using is an unofficial supported extended feature. alter table unsupported_feature.test set(Compress_level=3); -- failed +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: compress_level should be used with ZSTD algorithm. create table lm_rcp_4 (c1 int,c2 varchar2,c3 number,c4 money,c5 CHAR(20),c6 CLOB,c7 blob,c8 DATE,c9 BOOLEAN,c10 TIMESTAMP,c11 point,columns12 cidr) with(Compresstype=2,Compress_chunk_size=512) partition by list(c1) subpartition by range(c3)( @@ -98,12 +120,16 @@ create table lm_rcp_4 (c1 int,c2 varchar2,c3 number,c4 money,c5 CHAR(20),c6 CLOB partition ts2 values(6,7,8,9,10), partition ts3 values(11,12,13,14,15)(subpartition ts31 values less than(5000),subpartition ts32 values less than(10000),subpartition ts33 values less than(MAXVALUE)), partition ts4 values(default)); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create unique index indexg_lm_rcp_4 on lm_rcp_4(c1 NULLS first,c2,c3) global with(FILLFACTOR=80,Compresstype=2,Compress_chunk_size=512,compress_byte_convert=1,compress_diff_convert=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. --s3. alter index indexg_lm_rcp_4 rename to indexg_lm_rcp_4_newname; --s4.修改压缩类型 alter index indexg_lm_rcp_4_newname set(Compresstype=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. ERROR: change compresstype OPTION is not supported --s5.修改Compress_level alter index indexg_lm_rcp_4_newname set(Compress_level=3); +WARNING: The compressed relation you are using is an unofficial supported extended feature. diff --git a/src/test/regress/output/row_compression/row_compression_basebackup.source b/src/test/regress/output/row_compression/row_compression_basebackup.source index ee90bb59bf..0ed8a1df3a 100644 --- a/src/test/regress/output/row_compression/row_compression_basebackup.source +++ b/src/test/regress/output/row_compression/row_compression_basebackup.source @@ -1,7 +1,9 @@ --?.* CREATE DATABASE --?.* +--?.* CREATE TABLE +--?.* CREATE INDEX INSERT 0 1000 CHECKPOINT diff --git a/src/test/regress/output/row_compression/twophase.source b/src/test/regress/output/row_compression/twophase.source index 79e5b1c5bf..06cf76307b 100644 --- a/src/test/regress/output/row_compression/twophase.source +++ b/src/test/regress/output/row_compression/twophase.source @@ -1,12 +1,18 @@ begin; create table test_abort1(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table test_abort2(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table test_abort3(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. prepare transaction 'the first prepare transaction'; rollback prepared 'the first prepare transaction'; create table test_commit1(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table test_commit2(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table test_commit3(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. begin; drop table test_commit1; drop table test_commit2; @@ -23,16 +29,24 @@ INSERT 0 1 start transaction; create table "compress_2PC".normal(a text,b integer); create table "compress_2PC".compress1(a text,b integer) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".compress2(a text,b integer) with (compresstype=2,compress_chunk_size=2048,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".compress3(a text,b integer) with (compresstype=2,compress_chunk_size=1024,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".compress4(a text,b integer) with (compresstype=2,compress_chunk_size=512,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. rollback; -- drop commit create table "compress_2PC".normal(id int); create table "compress_2PC".compress1(id int) with (compresstype=2); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".compress2(id int) with (compresstype=2,compress_chunk_size=2048,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".compress3(id int) with (compresstype=2,compress_chunk_size=1024,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".compress4(id int) with (compresstype=2,compress_chunk_size=512,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. start transaction; drop table "compress_2PC".normal; drop table "compress_2PC".compress1; @@ -43,13 +57,18 @@ commit; -- 2pc create rollback begin; create table "compress_2PC".test_abort2(b integer) with (compresstype=2,compress_chunk_size=2048,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. prepare transaction 'the first prepare transaction'; rollback prepared 'the first prepare transaction'; --2pc drop rollback create table "compress_2PC".test_commit1(a text,b integer) with (compresstype=2,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".test_commit2(a text,b integer) with (compresstype=2,compress_chunk_size=2048,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".test_commit3(a text,b integer) with (compresstype=2,compress_chunk_size=1024,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".test_commit4(a text,b integer) with (compresstype=2,compress_chunk_size=512,compress_prealloc_chunks=1); +WARNING: The compressed relation you are using is an unofficial supported extended feature. create table "compress_2PC".test_commit5(a text,b integer); begin; drop table "compress_2PC".test_commit1; -- Gitee From 495043de6021c007191fa0e2fb109c4e1a7c727e Mon Sep 17 00:00:00 2001 From: shenzheng4 Date: Mon, 23 Sep 2024 01:32:48 +0800 Subject: [PATCH 342/347] =?UTF-8?q?gs=5Fguc=20reload=20before=20read=20gau?= =?UTF-8?q?ssdb.state=20=EF=BC=88cherry=20picked=20commit=20from=209f5cbea?= =?UTF-8?q?=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/bin/gs_guc/pg_guc.cpp | 71 +++++++++++++++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 6 deletions(-) diff --git a/src/bin/gs_guc/pg_guc.cpp b/src/bin/gs_guc/pg_guc.cpp index eee52a2d18..635ddabbde 100644 --- a/src/bin/gs_guc/pg_guc.cpp +++ b/src/bin/gs_guc/pg_guc.cpp @@ -54,6 +54,7 @@ #include "bin/elog.h" #include "openssl/rand.h" +#include "replication/replicainternal.h" #include "common/config/cm_config.h" #if defined(__CYGWIN__) @@ -164,6 +165,7 @@ char pid_file[MAXPGPATH]; char gucconf_file[MAXPGPATH] = {0x00}; char tempguc_file[MAXPGPATH] = {0x00}; char gucconf_lock_file[MAXPGPATH] = {0x00}; +char gaussdb_state_file[MAXPGPATH] = {0}; const int MAX_PARAM_LEN = 1024; const int MAX_VALUE_LEN = 1024; @@ -1550,6 +1552,31 @@ static char* do_guccheck(const char* param) return guc_value; } +void ReadDBStateFile(GaussState* state) +{ + FILE* statef = NULL; + + if (state == NULL) { + write_stderr(_(" Could not get information from gaussdb.state\n")); + return; + } + + statef = fopen(gaussdb_state_file, "r"); + if (statef == NULL) { + if (errno == ENOENT) { + write_stderr(_(" file \"%s\" is not exist\n"), gaussdb_state_file); + } else { + write_stderr(_(" open file \"%s\" failed : %s\n"), gaussdb_state_file, strerror(errno)); + } + exit(1); + } + if (0 == fread(state, 1, sizeof(GaussState), statef)) { + write_stderr(_(" read file \"%s\" failed\n"), gaussdb_state_file); + } + (void)fclose(statef); + statef = NULL; +} + /* * @@GaussDB@@ * Brief : static void do_config_reload() @@ -1559,24 +1586,52 @@ static char* do_guccheck(const char* param) int do_config_reload() { pgpid_t pid; + GaussState state; + errno_t tnRet = 0; pid = get_pgpid(); - if (pid == 0) /* no pid file */ - { + if (pid == 0) { /* no pid file */ write_stderr(_("%s: PID file \"%s\" does not exist\n"), progname, pid_file); write_stderr(_("Is server running?\n")); return FAILURE; - } else if (pid < 0) /* standalone backend, not postmaster */ - { + } else if (pid < 0) { /* standalone backend, not postmaster */ pid = -pid; write_stderr(_("%s: cannot reload server; " "single-user server is running (PID: %ld)\n"), - progname, - pid); + progname, pid); write_stderr(_("Please terminate the single-user server and try again.\n")); return FAILURE; } + if (kill(pid, 0) != 0) { + write_stderr(_("gaussdb is not alive.\n")); + return FAILURE; + } + + tnRet = memset_s(&state, sizeof(state), 0, sizeof(state)); + securec_check_c(tnRet, "\0", "\0"); + ReadDBStateFile(&state); + switch (state.state) { + case NORMAL_STATE: + case NEEDREPAIR_STATE: + case WAITING_STATE: + case DEMOTING_STATE: + case PROMOTING_STATE: + case BUILDING_STATE: + case CATCHUP_STATE: + break; + case COREDUMP_STATE: + write_stderr(_("ERROR: gaussDB state is Coredump\n")); + return FAILURE; + case UNKNOWN_STATE: + case STARTING_STATE: + write_stderr(_("ERROR: gaussDB state is Unknown or Staring\n")); + return FAILURE; + default: + write_stderr(_("ERROR: gaussDB state is Invalid\n")); + return FAILURE; + } + if (kill((pid_t)pid, sig) != 0) { write_stderr( _("%s: could not send reload signal (PID: %ld sig=%d): %s\n"), progname, pid, sig, gs_strerror(errno)); @@ -3349,9 +3404,13 @@ void get_instance_configfile(const char* datadir) securec_check_ss_c(nRet, "\0", "\0"); nRet = snprintf_s(gucconf_lock_file, MAXPGPATH, MAXPGPATH - 1, "%s/pg_hba.conf.lock", datadir); securec_check_ss_c(nRet, "\0", "\0"); + nRet = snprintf_s(gaussdb_state_file, MAXPGPATH, MAXPGPATH - 1, "%s/gaussdb.state", datadir); + securec_check_ss_c(nRet, "\0", "\0"); } else { nRet = snprintf_s(pid_file, MAXPGPATH, MAXPGPATH - 1, "%s/postmaster.pid", datadir); securec_check_ss_c(nRet, "\0", "\0"); + nRet = snprintf_s(gaussdb_state_file, MAXPGPATH, MAXPGPATH - 1, "%s/gaussdb.state", datadir); + securec_check_ss_c(nRet, "\0", "\0"); nRet = snprintf_s(gucconf_file, MAXPGPATH, MAXPGPATH - 1, "%s/postgresql.conf", datadir); securec_check_ss_c(nRet, "\0", "\0"); nRet = snprintf_s(tempguc_file, MAXPGPATH, MAXPGPATH - 1, "%s/%s", datadir, TEMP_PGCONF_FILE); -- Gitee From 5a50138d4a7c1afcbb284c7e91c2d32a263227d3 Mon Sep 17 00:00:00 2001 From: laishenghao Date: Mon, 23 Sep 2024 15:35:24 +0800 Subject: [PATCH 343/347] =?UTF-8?q?=E8=A7=A3=E5=86=B3u=5Fsess=E6=9C=AA?= =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E6=97=B6elog=E8=B0=83=E7=94=A8?= =?UTF-8?q?=E5=AF=BC=E8=87=B4core=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/backend/utils/error/elog.cpp | 29 +++++++++++++++---------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/src/common/backend/utils/error/elog.cpp b/src/common/backend/utils/error/elog.cpp index 31ee6fdbb6..e14e25abe1 100644 --- a/src/common/backend/utils/error/elog.cpp +++ b/src/common/backend/utils/error/elog.cpp @@ -208,6 +208,11 @@ bool in_error_recursion_trouble(void) return (t_thrd.log_cxt.recursion_depth > 2); } +inline bool IsEnableTranslation() +{ + return u_sess && u_sess->attr.attr_common.enable_nls; +} + /* * One of those fallback steps is to stop trying to localize the error * message, since there's a significant probability that that's exactly @@ -216,7 +221,7 @@ bool in_error_recursion_trouble(void) static inline const char* err_gettext(const char* str) { #ifdef ENABLE_NLS - if ((!u_sess->attr.attr_common.enable_nls) || in_error_recursion_trouble()) + if ((!IsEnableTranslation()) || in_error_recursion_trouble()) return str; else return gettext(str); @@ -1039,7 +1044,7 @@ int errcode_for_socket_access(void) char* fmtbuf = NULL; \ StringInfoData buf; \ /* Internationalize the error format string */ \ - if (u_sess->attr.attr_common.enable_nls \ + if (IsEnableTranslation() \ && (!in_error_recursion_trouble())) \ fmt = dngettext(edata->domain, fmt_singular, fmt_plural, n); \ else \ @@ -1089,7 +1094,7 @@ int errmsg(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(message, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(message, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1155,7 +1160,7 @@ int errdetail(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(detail, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(detail, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1199,7 +1204,7 @@ int errdetail_log(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(detail_log, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(detail_log, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1235,7 +1240,7 @@ int errcause(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(cause, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(cause, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1251,7 +1256,7 @@ int erraction(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(action, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(action, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1269,7 +1274,7 @@ int errhint(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(hint, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(hint, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1288,7 +1293,7 @@ int errquery(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(internalquery, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(internalquery, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1311,7 +1316,7 @@ int errcontext(const char* fmt, ...) CHECK_STACK_DEPTH(); oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(context, true, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(context, true, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); t_thrd.log_cxt.recursion_depth--; @@ -1975,7 +1980,7 @@ char* format_elog_string(const char* fmt, ...) oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(message, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(message, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); @@ -3697,7 +3702,7 @@ void SimpleLogToServer(int elevel, bool silent, const char* fmt, ...) oldcontext = MemoryContextSwitchTo(ErrorContext); - EVALUATE_MESSAGE(message, false, u_sess->attr.attr_common.enable_nls); + EVALUATE_MESSAGE(message, false, IsEnableTranslation()); MemoryContextSwitchTo(oldcontext); -- Gitee From 5a6a8fca6debed31ef71fce6b1cd5ce3637919cd Mon Sep 17 00:00:00 2001 From: lyanna <1016943941@qq.com> Date: Mon, 23 Sep 2024 16:20:35 +0800 Subject: [PATCH 344/347] =?UTF-8?q?reinforce=20checkneedswitch=20=EF=BC=88?= =?UTF-8?q?cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index 3de02ae2cc..53748c73ed 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -56,10 +56,8 @@ bool CheckNeedSwitch(UndoPersistence upersistence) if (uzone == NULL) { ereport(PANIC, (errmsg("CheckNeedSwitch: uzone is NULL"))); } - if ((uint64)UNDO_PTR_GET_OFFSET(uzone->GetInsertURecPtr()) + UNDO_LOG_SEGMENT_SIZE > UNDO_LOG_MAX_SIZE) { - return true; - } - return false; + + return uzone->CheckNeedSwitch(); } void RollbackIfUndoExceeds(TransactionId xid, uint64 size) -- Gitee From 5ec9f92edc35594b840c7519299a0603b777afeb Mon Sep 17 00:00:00 2001 From: lyanna <1016943941@qq.com> Date: Mon, 23 Sep 2024 16:16:57 +0800 Subject: [PATCH 345/347] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=AE=BF=E9=97=AE?= =?UTF-8?q?=E5=B7=B2=E8=A2=ABrelease=20buffer=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=EF=BC=88cherry=20picked=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/access/ubtree/ubtpage.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gausskernel/storage/access/ubtree/ubtpage.cpp b/src/gausskernel/storage/access/ubtree/ubtpage.cpp index 35ee49897f..482190e298 100644 --- a/src/gausskernel/storage/access/ubtree/ubtpage.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtpage.cpp @@ -869,6 +869,7 @@ static bool UBTreeMarkPageHalfDead(Relation rel, Buffer leafbuf, BTStack stack) itemid = PageGetItemId(page, nextoffset); itup = (IndexTuple) PageGetItem(page, itemid); if (UBTreeTupleGetDownLink(itup) != rightsib) { + OffsetNumber topparentblkno = BufferGetBlockNumber(topparent); _bt_relbuf(rel, topparent); Buffer rbuf = _bt_getbuf(rel, rightsib, BT_READ); Page rpage = BufferGetPage(rbuf); @@ -880,7 +881,7 @@ static bool UBTreeMarkPageHalfDead(Relation rel, Buffer leafbuf, BTStack stack) } elog(ERROR, "right sibling %u of block %u is not next child %u of block %u in index \"%s\"", rightsib, target, UBTreeTupleGetDownLink(itup) != rightsib, - BufferGetBlockNumber(topparent), RelationGetRelationName(rel)); + topparentblkno, RelationGetRelationName(rel)); } /* -- Gitee From 0b8779bd92239a354132d7960cbf63b695a08286 Mon Sep 17 00:00:00 2001 From: liuheng Date: Fri, 28 Jun 2024 16:41:39 +0800 Subject: [PATCH 346/347] =?UTF-8?q?fix=20libpgtypes=E6=89=93=E5=8C=85?= =?UTF-8?q?=E7=89=88=E6=9C=AC=E9=97=AE=E9=A2=98=20=EF=BC=88cherry=20picked?= =?UTF-8?q?=20commit=20from=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- build/script/aarch64_opengauss_list | 4 +--- build/script/x86_64_opengauss_list | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/build/script/aarch64_opengauss_list b/build/script/aarch64_opengauss_list index 7b7e88d826..fb51463074 100644 --- a/build/script/aarch64_opengauss_list +++ b/build/script/aarch64_opengauss_list @@ -840,9 +840,7 @@ ./lib/libecpg.a ./lib/libecpg_compat.a ./lib/libpgtypes.a -./lib/libpgtypes.so -./lib/libpgtypes.so.3 -./lib/libpgtypes.so.3.4 +./lib/libpgtypes.so* ./lib/libgauss_cl_jni.so ./lib/libnuma.so ./lib/libnuma.so.1 diff --git a/build/script/x86_64_opengauss_list b/build/script/x86_64_opengauss_list index 5d462ccd82..faa707768a 100644 --- a/build/script/x86_64_opengauss_list +++ b/build/script/x86_64_opengauss_list @@ -840,9 +840,7 @@ ./lib/libecpg.a ./lib/libecpg_compat.a ./lib/libpgtypes.a -./lib/libpgtypes.so -./lib/libpgtypes.so.3 -./lib/libpgtypes.so.3.4 +./lib/libpgtypes.so* ./lib/libgauss_cl_jni.so ./lib/libcgroup.so* ./lib/libcom_err_gauss.so* -- Gitee From 54aaf5192ec4d1362f2a201cf0c048c43f458d85 Mon Sep 17 00:00:00 2001 From: zjcui <15050584343@163.com> Date: Sat, 14 Dec 2024 17:42:57 +0800 Subject: [PATCH 347/347] =?UTF-8?q?MES=20RECV=E7=BA=BF=E7=A8=8B=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E5=88=9D=E5=A7=8B=E5=8C=96=E7=BA=BF=E7=A8=8B=E8=AE=A1?= =?UTF-8?q?=E7=AE=97=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/include/storage/proc.h | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 8a212b6d7b..d56829eed1 100755 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -29,7 +29,7 @@ #include "utils/atomic.h" #include "utils/snapshot.h" #include "access/multi_redo_settings.h" - +#include "c.h" /* * Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds @@ -475,9 +475,30 @@ const int MAX_COMPACTION_THREAD_NUM = 10; #define NUM_DMS_SMON_CALLBACK_PROC (2) // smon + smon_recycle #define NUM_DMS_PARALLEL_CALLBACK_PROC (g_instance.attr.attr_storage.dms_attr.parallel_thread_num <= 1 ? 0 : \ g_instance.attr.attr_storage.dms_attr.parallel_thread_num) -#define NUM_DMS_PRIO_CNT 4 -#define NUM_DMS_RECV_THREAD_CNT (g_instance.attr.attr_storage.dms_attr.channel_count * \ - (g_instance.attr.attr_storage.dms_attr.inst_count - 1) * NUM_DMS_PRIO_CNT) + +#define NUM_DMS_CKPT_NOTIFY_TASK_RATIO (1.0f / 32) +#define NUM_DMS_CLEAN_EDP_TASK_RATIO (1.0f / 32) +#define NUM_DMS_DERIVED_TASK_RATIO (1.0f / 8) +#define NUM_DMS_RECV_WORK_THREAD_RATIO (1.0f / 4) + +#define NUM_DMS_WORK_THREAD_PRIO_0_2 (4) +#define NUM_DMS_WORK_THREAD_PRIO_3 Max(1, (uint32)(NUM_DMS_WORK_THREAD_PROCS * NUM_DMS_CKPT_NOTIFY_TASK_RATIO)) +#define NUM_DMS_WORK_THREAD_PRIO_4 Max(1, (uint32)(NUM_DMS_WORK_THREAD_PROCS * NUM_DMS_CLEAN_EDP_TASK_RATIO)) +#define NUM_DMS_WORK_THREAD_PRIO_5 Max(1, (uint32)(NUM_DMS_WORK_THREAD_PROCS * NUM_DMS_DERIVED_TASK_RATIO)) + +#define NUM_DMS_RECV_THREAD_PRIO_0_2 (3) +#define NUM_DMS_RECV_THREAD_PRIO_3 Max(1, (uint32)(NUM_DMS_WORK_THREAD_PRIO_3 * NUM_DMS_RECV_WORK_THREAD_RATIO)) +#define NUM_DMS_RECV_THREAD_PRIO_4 Max(1, (uint32)(NUM_DMS_WORK_THREAD_PRIO_4 * NUM_DMS_RECV_WORK_THREAD_RATIO)) +#define NUM_DMS_RECV_THREAD_PRIO_5 Max(1, (uint32)(NUM_DMS_WORK_THREAD_PRIO_5 * NUM_DMS_RECV_WORK_THREAD_RATIO)) +#define NUM_DMS_RECV_THREAD_PRIO_6 \ + Max(1, (uint32)((NUM_DMS_WORK_THREAD_PROCS - NUM_DMS_WORK_THREAD_PRIO_0_2 - \ + NUM_DMS_WORK_THREAD_PRIO_3 - NUM_DMS_WORK_THREAD_PRIO_4 - NUM_DMS_WORK_THREAD_PRIO_5) * \ + NUM_DMS_RECV_WORK_THREAD_RATIO)) +#define NUM_DMS_RECV_THREAD_PRIO_8 (1) +#define NUM_DMS_RECV_THREAD_CNT \ + (NUM_DMS_RECV_THREAD_PRIO_0_2 + NUM_DMS_RECV_THREAD_PRIO_3 + NUM_DMS_RECV_THREAD_PRIO_4 + \ + NUM_DMS_RECV_THREAD_PRIO_5 + NUM_DMS_RECV_THREAD_PRIO_6 + NUM_DMS_RECV_THREAD_PRIO_8) + #define NUM_DMS_MAX_WORK_THREAD_PROCS (g_instance.attr.attr_storage.dms_attr.work_thread_pool_max_cnt) #define NUM_DMS_WORK_SCHEDULER_PROC (1) #define NUM_DMS_RDMA_THREAD_PROCS (g_instance.attr.attr_storage.dms_attr.work_thread_pool_max_cnt != 0 ? \ -- Gitee