diff --git a/contrib/gs_filedump/CMakeLists.txt b/contrib/gs_filedump/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/contrib/gs_filedump/Makefile b/contrib/gs_filedump/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ca77887804cce993f6a76cf88eded95e62efeb86 --- /dev/null +++ b/contrib/gs_filedump/Makefile @@ -0,0 +1,21 @@ +# View README.gs_filedump.md first + +# note this must match version macros in gs_filedump.h +PGFILEDESC = "gs_filedump - decode and display datafile" +PGAPPICON = win32 + +# OBJS = gs_filedump.o decode.o decode_ustore.o stringinfo.o checksum.o \ + +PROGRAM = gs_filedump +OBJS = gs_filedump.o decode.o stringinfo.o checksum.o segment.o \ + $(top_builddir)/src/bin/pg_probackup/pg_lzcompress.o \ + +REGRESS = datatypes float numeric xml toast +TAP_TESTS = 1 +EXTRA_CLEAN = *.heap $(wildcard [1-9]???[0-9]) # testsuite leftovers + +subdir = contrib/gs_filedump +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk + diff --git a/contrib/gs_filedump/README.gs_filedump.md b/contrib/gs_filedump/README.gs_filedump.md new file mode 100644 index 0000000000000000000000000000000000000000..38dc777dbbfcd456a33610da966edebde755e131 --- /dev/null +++ b/contrib/gs_filedump/README.gs_filedump.md @@ -0,0 +1,99 @@ +# gs_filedump - Display formatted contents of a PostgreSQL heap, index, or control file + +Copyright (c) 2002-2010 Red Hat, Inc. + +Copyright (c) 2011-2023, PostgreSQL Global Development Group + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +Original Author: Patrick Macdonald + + +## Overview: + +gs_filedump is a utility to format PostgreSQL heap/index/control files +into a human-readable form. You can format/dump the files several ways, +as listed in the Invocation section, as well as dumping straight binary. + +The type of file (heap/index) can usually be determined automatically +by the content of the blocks within the file. However, to format a +pg_control file you must use the -c option. + +The default is to format the entire file using the block size listed in +block 0 and display block relative addresses. These defaults can be +modified using run-time options. + +Some options may seem strange but they're there for a reason. For +example, block size. It's there because if the header of block 0 is +corrupt, you need a method of forcing a block size. + + +## Compile/Installation: + +To compile gs_filedump, you will need to have a properly configured +PostgreSQL source tree or the devel packages (with include files) +of the appropriate PostgreSQL major version. + +``` +make PG_CONFIG=/path/to/postgresql/bin/pg_config +make install PG_CONFIG=/path/to/postgresql/bin/pg_config +``` + + +## Invocation: + +``` +Usage: gs_filedump [-abcdfhikxy] [-R startblock [endblock]] [-D attrlist] [-S blocksize] [-s segsize] [-n segnumber] file + +Display formatted contents of a PostgreSQL heap/index/control file +Defaults are: relative addressing, range of the entire file, block + size as listed on block 0 in the file + +The following options are valid for heap and index files: + -a Display absolute addresses when formatting (Block header + information is always block relative) + -b Display binary block images within a range (Option will turn + off all formatting options) + -d Display formatted block content dump (Option will turn off + all other formatting options) + -D Decode tuples using given comma separated list of types + Supported types: + bigint bigserial bool char charN date float float4 float8 int + json macaddr name numeric oid real serial smallint smallserial text + time timestamp timestamptz timetz uuid varchar varcharN xid xml + ~ ignores all attributes left in a tuple + -f Display formatted block content dump along with interpretation + -h Display this information + -i Display interpreted item details + -k Verify block checksums + -o Do not dump old values. + -R Display specific block ranges within the file (Blocks are + indexed from 0) + [startblock]: block to start at + [endblock]: block to end at + A startblock without an endblock will format the single block + -s Force segment size to [segsize] + -u Display content with storage type is ustore + -t Dump TOAST files + -v Ouput additional information about TOAST relations + -n Force segment number to [segnumber] + -S Force block size to [blocksize] + -x Force interpreted formatting of block items as index items + -y Force interpreted formatting of block items as heap items + +The following options are valid for control files: + -c Interpret the file listed as a control file + -f Display formatted content dump along with interpretation + -S Force block size to [blocksize] +Additional functions: + -m Interpret file as pg_filenode.map file and print contents (all + other options will be ignored) + +Report bugs to +``` + +In most cases it's recommended to use the -i and -f options to get +the most useful dump output. diff --git a/contrib/gs_filedump/README.md b/contrib/gs_filedump/README.md new file mode 120000 index 0000000000000000000000000000000000000000..3e433b9df20bfca3bfe6cfdd7754f84f400063e7 --- /dev/null +++ b/contrib/gs_filedump/README.md @@ -0,0 +1 @@ +README.gs_filedump.md \ No newline at end of file diff --git a/contrib/gs_filedump/README_01.md b/contrib/gs_filedump/README_01.md new file mode 100644 index 0000000000000000000000000000000000000000..aad54dd1400d075e120355b945614464c2eda26e --- /dev/null +++ b/contrib/gs_filedump/README_01.md @@ -0,0 +1,490 @@ +## 一个帮助文档 + +### 帮助信息 + +``` +Usage: gs_filedump [-abcdfhikuxy] [-r relfilenode] [-T reltoastrelid] [-R startblock [endblock]] [-D attrlist] [-S blocksize] [-s segsize] [-n segnumber] file +``` + +`-h` 显示帮助 + +`-abdfkxyv` 调试时使用,一般用不到 + +`-s` 和 `-S` 强制块,段大小,一般也用不到 + + + +`-i` 显示解析条目的详细信息 + +`-o` 不存储已删除的条目信息 + +`-c` 用于解析控制文件 + +`-m` 用于解析映射文件 `pg_filenode.map` + +`-u` 用于解析`ustore`存储引擎的表(不加`-u`参数默认解析`astore`存储引擎的表) + +`-t` 转储Toast文件(支持`astore` 和`ustore`存储引擎) + +`-R` 指定要解析的block范围,(以0开始) + +`-D` 解析的表的列类型,以逗号分割,支持的类型 + +``` +Supported types: + bigint bigserial bool char charN date float float4 float8 int + json macaddr name numeric oid real serial smallint smallserial text + time timestamp timestamptz timetz uuid varchar varcharN xid xml + ~ ignores all attributes left in a tuple +``` + +**段页式存储表解析参数:** + +当指定解析段页式存储的表时,文件必须指定为 **[文件目录/1]** + +`-r` 指定要解析的表的`[relfilenode]` + +`-T` 指定要解析的表对应的pg_toast表的`[relfilenode]`,(解析段页式存储表时,不再支持`-t`参数) + + + +### 几个使用案例 + +#### astore存储引擎 + +##### 查看表结构,表类型,表数据 + +```sql +openGauss=# \d+ table_01; + Table "public.table_01" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+-----------+----------+--------------+------------- + id | integer | | plain | | + name | name | | plain | | + xml | xml | | extended | | +Has OIDs: no +Options: orientation=row, compression=no + +openGauss=# select * from table_01; + id | name | xml +----+------+-------------------- + 1 | AAA | + 2 | BBB | Hello + 3 | CCC | World +(3 rows) + +-- 删除一条数据 + +openGauss=# delete from table_01 where id=2; +DELETE 1 +openGauss=# select * from table_01; + id | name | xml +----+------+-------------------- + 1 | AAA | + 3 | CCC | World +(2 rows) + +openGauss=# vacuum table_01; +VACUUM +``` + + + +##### 查看表的`relfilenode` + +```sql +openGauss=# select oid, relname, relfilenode, reltoastrelid from pg_class where relname='table_01'; + oid | relname | relfilenode | reltoastrelid +--------+----------+-------------+--------------- + 237606 | table_01 | 237606 | 237609 +(1 row) + +openGauss=# select pg_relation_filepath('table_01'); + pg_relation_filepath +---------------------- + base/15743/237606 +(1 row) + +-- $PGDATA/base/15743/237606 即为存储表table_01的物理文件 +``` + +##### 使用`gs_filedump`转储表数据 + +**不加`-o`参数,默认会显示已删除的条目** + +```bash +[omm@cmnode2 ~]$ gs_filedump -D int,name,xml $PGDATA/base/15743/237606 + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: /app/opengauss/cluster/dn1/base/15743/237606 +* Options used: -D int,name,xml +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 6 Upper 7872 (0x1ec0) + LSN: logid 0 recoff 0x128e0b20 Special 8192 (0x2000) + Items: 3 Free Space: 7820 + Checksum: 0x29bf Prune XID: 0x0006d855 Flags: 0x0040 (CHECKSUM_FNV1A) + Length (including item array): 36 + + ----- + Item 1 -- Length: 92 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 AAA \N + Item 2 -- Length: 109 Offset: 7984 (0x1f30) Flags: NORMAL +COPY: 2 BBB Hello + Item 3 -- Length: 111 Offset: 7872 (0x1ec0) Flags: NORMAL +COPY: 3 CCC World + + +*** End of File Encountered. Last Block Read: 0 *** +``` + + + +**加上`-o`参数,将不显示已删除的条目** + +```bash +[omm@cmnode2 ~]$ gs_filedump -o -D int,name,xml $PGDATA/base/15743/237606 + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: /app/opengauss/cluster/dn1/base/15743/237606 +* Options used: -o -D int,name,xml +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 6 Upper 7872 (0x1ec0) + LSN: logid 0 recoff 0x128e0b20 Special 8192 (0x2000) + Items: 3 Free Space: 7820 + Checksum: 0x29bf Prune XID: 0x0006d855 Flags: 0x0040 (CHECKSUM_FNV1A) + Length (including item array): 36 + + ----- + Item 1 -- Length: 92 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 AAA \N + Item 2 -- Length: 109 Offset: 7984 (0x1f30) Flags: NORMAL +tuple was removed by transaction #448597 + Item 3 -- Length: 111 Offset: 7872 (0x1ec0) Flags: NORMAL +COPY: 3 CCC World + + +*** End of File Encountered. Last Block Read: 0 *** +``` + +**如果只需要解析前两列信息,可用`~`符号忽略之后的列类型** + +```bash +[omm@cmnode2 ~]$ gs_filedump -o -D int,name,~ $PGDATA/base/15743/237606 |grep -i copy +COPY: 1 AAA +COPY: 3 CCC +``` + +> 注意: ~ 符号只能存在-D参数的最后,不能存在在中间,例如 `-D int,~,xml` 是不被允许的 + +##### 其他 + +```bash +# 如果需要显示条目详细信息可加-i参数 +gs_filedump -i -D int,name,xml $PGDATA/base/15743/237606 +# 如果需要转储toast文件,可加-t参数 +gs_filedump -t -D int,name,xml $PGDATA/base/15743/237606 + +``` + +#### ustore存储引擎 + +```bash +# 使用方式通astore,只需要添加一个-u参数指定存储引擎类型 + +# 不加`-o`参数,已删除的条目会显示Flags为: REDIRECT +[omm@cmnode2 ~]$ gs_filedump -u -D int,name,xml $PGDATA/base/15743/237612 + ----- + Item 1 -- Length: 80 Offset: 8112 (0x1fb0) Flags: NORMAL +COPY: 1 AAA \N + Item 2 -- Length: 0 Offset: 257 (0x0101) Flags: REDIRECT + Item 3 -- Length: 98 Offset: 8014 (0x1f4e) Flags: NORMAL +COPY: 3 CCC World + +# 加上`-o`参数,将不显示已删除的条目 +gs_filedump -uo -D int,name,xml $PGDATA/base/15743/237612 +# 如果只需要解析前两列信息,可用`~`符号忽略之后的列类型 +gs_filedump -u -D int,name,~ $PGDATA/base/15743/237612 +# 如果需要显示条目详细信息可加-i参数 +gs_filedump -ui -D int,name,xml $PGDATA/base/15743/237612 +# 如果需要转储toast文件,可加-t参数 +gs_filedump -ut -D int,name,xml $PGDATA/base/15743/237612 +``` + + + +#### 段页式存储引擎 + +```bash +openGauss=# \d+ table_03; + Table "public.table_03" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+-----------+----------+--------------+------------- + id | integer | | plain | | + name | name | | plain | | + xml | xml | | extended | | +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +openGauss=# select oid, relname, relfilenode, reltoastrelid from pg_class where relname='table_03'; + oid | relname | relfilenode | reltoastrelid +--------+----------+-------------+--------------- + 237618 | table_03 | 4187 | 237621 +(1 row) + +openGauss=# select oid, relname, relfilenode, reltoastrelid from pg_class where oid = 237621; + oid | relname | relfilenode | reltoastrelid +--------+-----------------+-------------+--------------- + 237621 | pg_toast_237618 | 4188 | 0 +(1 row) + +openGauss=# select pg_relation_filepath('table_03'); + pg_relation_filepath +---------------------- + base/15743/4187 +(1 row) +``` + + + +因为段页式存储引擎的表都存在1-5号文件,不单独存储在独立的文件中,所有转储段页式存储表,需要指定1号文件的路径,和要转储表的`relfilenode` + +```bash +# -r 指定表relfilenode +[omm@cmnode2 ~]$ gs_filedump -r 4187 -D int,name,xml $PGDATA/base/15743/1 |grep -i copy +COPY: 1 AAA \N +COPY: 2 BBB Hello +COPY: 3 CCC World +``` + + + +段页式存储,如果需要转储toast类型字段,需要用`-T`参数指定`pg_toast`对应的`relfilenode` + +```bash +[omm@cmnode2 ~]$ gs_filedump -r 4187 -T 4188 -D text,text $PGDATA/base/15743/1 +``` + + + +### 在无法登录数据库时查询表映射信息 + +以上都是能登录数据库时,可以方便的查出表对应二进制文件和relfilenode等相关信息,如果数据库无法登录时,该怎么获取相关信息? + +**以table_01表为例** + +#### 1、系统表pg_class的oid为1259,首先通过映射文件找到pg_class对应的物理文件。 + +```bash +[omm@cmnode2 dn1]$ cd $PGDATA +[omm@cmnode2 dn1]$ find . -name pg_filenode.map +./global/pg_filenode.map +./base/1/pg_filenode.map +./base/15738/pg_filenode.map +./base/15743/pg_filenode.map + +[omm@cmnode2 dn1]$ gs_filedump -m ./base/1/pg_filenode.map |grep 1259 +OID: 1259 Filenode: 15339 + +# 这里查询到系统表pg_class对应的物理文件为15339 +``` + +#### 2、先通过pg_class文件查询表pg_class的相关信息 + +```bash +[omm@cmnode2 dn1]$ gs_filedump -io -D name,oid,oid,oid,oid,oid,oid,oid,float8,float8,int,oid,oid,oid,oid,oid,oid,bool,bool,char,char,smallint,smallint,bool,bool,bool,bool,bool,char,bool,bool,char,int,text,text,~ ./base/15743/15339 |grep -i -B 10 table_01 +tuple was removed by transaction #311309620 + Item 34 -- Length: 0 Offset: 35 (0x0023) Flags: REDIRECT +tuple was removed by transaction #25177244 + Item 35 -- Length: 280 Offset: 1232 (0x04d0) Flags: NORMAL + XMIN: 437175 XMAX: 0 CID|XVAC: 7 OID: 237606 + Block Id: 27 linp Index: 35 Attributes: 40 Size: 32 + infomask: 0x290b (HASNULL|HASVARWIDTH|HASOID|XMAX_LOCK_ONLY|XMIN_COMMITTED|XMAX_INVALID|UPDATED|HEAP_ONLY) + t_bits: [0]: 0xff [1]: 0xff [2]: 0xff [3]: 0xff + [4]: 0x9d + +COPY: table_01 2200 237608 0 10 0 237606 0 0 0 0 237609 0 0 0 0 0 f f p r 3 0448595 \N \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0L\0\0\0orientation=row\0H\0\0\0compression=no\0\0 +``` + +> 通过以上信息可获得table_01的`oid`为 237606 +> +> `relnamespace`为:2200, `relfilenode`为:237606, `reltoastrelid`为: 237609 +> +> `reloptions`为:orientation=row,compression=no + +#### 3、以同样的方式获取系统表pg_attribute和系统表pg_type的oid,系统表pg_namespace的relfilenode + +``` +[omm@cmnode2 dn1]$ /home/omm/gs_filedump -io -D name,oid,oid,oid,oid,oid,oid,oid,float8,float8,int,oid,oid,oid,oid,oid,oid,bool,bool,char,char,smallint,smallint,bool,bool,bool,bool,bool,char,bool,bool,char,int,text,text,~ ./base/15743/15339 |grep -iE -B 10 "pg_attribute|pg_type|pg_namespace" +``` + +> 获得 pg_type对应`oid`为1247 +> +> pg_attribute对应`oid`为:1249 ,`pg_namespace`对应`oid`为:2615, `relfilenode`为:15454 +> +> 注意:不同数据库这里获取的`oid`值可能不同 + +#### 4、获取表pg_type和表pg_attribute对应的物理文件 + +```bash +[omm@cmnode2 dn1]$ gs_filedump -m ./base/1/pg_filenode.map |grep -E "1247|1249|2615" +OID: 1249 Filenode: 15320 +OID: 1247 Filenode: 15124 + +# pg_attribute: +[omm@cmnode2 dn1]$ find . -name 15320 +./base/1/15320 +./base/15738/15320 +./base/15743/15320 +# pg_type +[omm@cmnode2 dn1]$ find . -name 15124 +./base/1/15124 +./base/15738/15124 +./base/15743/15124 +``` + +#### 5、通过表pg_attribute获取表table_01的字段名称及类型oid + +```bash +[omm@cmnode2 dn1]$ gs_filedump -o -D oid,name,oid,int,smallint,smallint,~ ./base/15743/15320 |grep -i "copy: 237606" +COPY: 237606 id 23 -1 4 1 +COPY: 237606 name 19 -1 64 2 +COPY: 237606 xml 142 -1 -1 3 +COPY: 237606 ctid 27 0 6 -1 +COPY: 237606 xmin 28 0 8 -3 +COPY: 237606 cmin 29 0 4 -4 +COPY: 237606 xmax 28 0 8 -5 +COPY: 237606 cmax 29 0 4 -6 +COPY: 237606 tableoid 26 0 4 -7 +COPY: 237606 xc_node_id 23 0 4 -8 +``` + +排查掉最后一列为负数的,即为表table_01的列信息 + +```bash +COPY: 237606 id 23 -1 4 1 +COPY: 237606 name 19 -1 64 2 +COPY: 237606 xml 142 -1 -1 3 +``` + + + +6、通过系统表pg_type查询oid为:[23, 19, 142] 的具体类型,注意,此处的id, name, xml 为创建表的表名,非表类型。 + +```bash +[omm@cmnode2 dn1]$ gs_filedump -i -D name,~ ./base/15743/15124 |grep -EA 5 'OID: 23$|OID: 19$|OID: 142$' | grep -E 'OID|COPY' | grep -v infomask | awk '{print $NF}' |xargs -n2 +19 name +23 int4 +142 xml +``` + +与第5步的信息组合,可获取表结构为: + +```bash +table_01: +id int4 +name name +xml xml + +oid: 237606 relnamespace为:2200, relfilenode为:237606, reltoastrelid为: 237609 +``` + + + +### 脚本查询 + +鉴于查询表结构较为复杂,现把上述步骤整理为脚本 gs_desc,便于用户使用 + +#### 帮助信息 + +```bash +[omm@cmnode2 dn1]$ gs_desc -h +usage: gs_desc [-h] [-s SEARCHPATH] [-n NAMESPACE [NAMESPACE ...]] -t + TABLENAME [TABLENAME ...] + +Process some integers. + +optional arguments: + -h, --help show this help message and exit + -s SEARCHPATH, --searchpath SEARCHPATH + Specify the search path + -n NAMESPACE [NAMESPACE ...], --namespace NAMESPACE [NAMESPACE ...] + Specify the namespace(s) + -t TABLENAME [TABLENAME ...], --tablename TABLENAME [TABLENAME ...] + Specify the tablename(s) +``` + + + +#### 举例 + +```bash +[omm@cmnode2 dn1]$ /home/omm/gs_desc -t table_01 table_02 seg_toast table_not_exists +************************************************** +* +* Namespaces: None, Tables: ['table_01', 'table_02', 'seg_toast', 'table_not_exists'] +* +************************************************** + Table "public.seg_toast" +Column Name | Type +---------------+-------- +x | text +y | text + +OID: 213033, Relname.Relfilenode: 4183, Toast.Relfilenode: 4184 +Suggest Query Type: + -r 4183 [-T 4184] -D text,text +Location of Binary file: + /app/opengauss/cluster/dn1/base/15743/1 + +Options: orientation=row, segment=on, compression=no + + Table "public.table_01" +Column Name | Type +---------------+-------- +id | int4 +name | name +xml | xml + +OID: 237606, Relname.Relfilenode: 237606, Toast.Relfilenode: 237609 +Suggest Query Type: + -D int,name,xml +Location of Binary file : + /app/opengauss/cluster/dn1/base/15743/237606 + +Options: orientation=row, compression=no + + Table "public.table_02" +Column Name | Type +---------------+-------- +id | int4 +name | name +xml | xml + +OID: 237612, Relname.Relfilenode: 237612, Toast.Relfilenode: 237615 +Suggest Query Type: + -u -D int,name,xml +Location of Binary file : + /app/opengauss/cluster/dn1/base/15743/237612 + +Options: orientation=row, type=ustore, compression=no + +@@@@@@@@@@ +Not found table(s): ['table_not_exists'] +@@@@@@@@@@ +``` + + + + diff --git a/contrib/gs_filedump/checksum.cpp b/contrib/gs_filedump/checksum.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b140df066b56fbb35fea415a6330b12ef7cad0c --- /dev/null +++ b/contrib/gs_filedump/checksum.cpp @@ -0,0 +1,82 @@ +#include "postgres_fe.h" +#include "storage/checksum_impl.h" + +static inline uint32 pg_checksum_init(uint32 seed, uint32 value) +{ + CHECKSUM_COMP(seed, value); + return seed; +} + +uint32 pg_checksum_block(char *data, uint32 size) +{ + uint32 sums[N_SUMS]; + uint32 *dataArr = (uint32 *)data; + uint32 result = 0; + uint32 i, j; + + /* ensure that the size is compatible with the algorithm */ + Assert((size % (sizeof(uint32) * N_SUMS)) == 0); + + uint32 CHECK_STEP = 2; + /* initialize partial checksums to their corresponding offsets */ + for (j = 0; j < N_SUMS; j += CHECK_STEP) { + sums[j] = pg_checksum_init(g_checksumBaseOffsets[j], dataArr[j]); + sums[j + 1] = pg_checksum_init(g_checksumBaseOffsets[j + 1], dataArr[j + 1]); + } + dataArr += N_SUMS; + + /* main checksum calculation */ + for (i = 1; i < size / (sizeof(uint32) * N_SUMS); i++) { + for (j = 0; j < N_SUMS; j += CHECK_STEP) { + CHECKSUM_COMP(sums[j], dataArr[j]); + CHECKSUM_COMP(sums[j + 1], dataArr[j + 1]); + } + dataArr += N_SUMS; + } + + /* finally add in two rounds of zeroes for additional mixing */ + for (j = 0; j < N_SUMS; j++) { + CHECKSUM_COMP(sums[j], 0); + CHECKSUM_COMP(sums[j], 0); + + /* xor fold partial checksums together */ + result ^= sums[j]; + } + + return result; +} + +/* + * Compute the checksum for an openGauss page. The page must be aligned on a + * 4-byte boundary. + * + * The checksum includes the block number (to detect the case where a page is + * somehow moved to a different location), the page header (excluding the + * checksum itself), and the page data. + */ +uint16 pg_checksum_page(char *page, BlockNumber blkno) +{ + PageHeader phdr = (PageHeader)page; + uint16 save_checksum; + uint32 checksum; + + /* + * Save pd_checksum and temporarily set it to zero, so that the checksum + * calculation isn't affected by the old checksum stored on the page. + * Restore it after, because actually updating the checksum is NOT part of + * the API of this function. + */ + save_checksum = phdr->pd_checksum; + phdr->pd_checksum = 0; + checksum = pg_checksum_block(page, BLCKSZ); + phdr->pd_checksum = save_checksum; + + /* Mix in the block number to detect transposed pages */ + checksum ^= blkno; + + /* + * Reduce to a uint16 (to fit in the pd_checksum field) with an offset of + * one. That avoids checksums of zero, which seems like a good idea. + */ + return (checksum % UINT16_MAX) + 1; +} \ No newline at end of file diff --git a/contrib/gs_filedump/decode.cpp b/contrib/gs_filedump/decode.cpp new file mode 100644 index 0000000000000000000000000000000000000000..af2eda6c1728fb2a696564f34b16fa18cb07dda5 --- /dev/null +++ b/contrib/gs_filedump/decode.cpp @@ -0,0 +1,1307 @@ +#include "gs_filedump.h" +#include "decode.h" +#include +#include +#include +#include +#include +#include +#include + +static bool itemIsNull; +static unsigned int itemSize; +static int ignore_location = -1; + +static int ReadStringFromToast(const char *buffer, unsigned int buff_size, unsigned int *out_size, + int (*parse_value)(const char *, int)); + +static int decode_smallint(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_int(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_uint(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_uint64(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_bigint(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_time(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_timetz(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_date(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_timestamp_internal(const char *buffer, unsigned int buff_size, unsigned int *out_size, + bool with_timezone); + +static int decode_timestamp(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_timestamptz(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_float4(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_float8(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_bool(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_uuid(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_macaddr(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_string(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_char(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_name(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int decode_numeric(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int extract_data(const char *buffer, unsigned int buff_size, unsigned int *out_size, + int (*parse_value)(const char *, int)); + +static int decode_ignore(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +static int ncallbacks = 0; +static decode_callback_t callbacks[ATTRTYPES_STR_MAX_LEN / 2] = {NULL}; + +static ParseCallbackTableItem callback_table[] = { + {"smallserial", &decode_smallint}, + {"smallint", &decode_smallint}, + {"int", &decode_int}, + {"oid", &decode_uint}, + {"xid", &decode_uint64}, + {"serial", &decode_int}, + {"bigint", &decode_bigint}, + {"bigserial", &decode_bigint}, + {"time", &decode_time}, + {"timetz", &decode_timetz}, + {"date", &decode_date}, + {"timestamp", &decode_timestamp}, + {"timestamptz", &decode_timestamptz}, + {"real", &decode_float4}, + {"float4", &decode_float4}, + {"float8", &decode_float8}, + {"float", &decode_float8}, + {"bool", &decode_bool}, + {"uuid", &decode_uuid}, + {"macaddr", &decode_macaddr}, + {"name", &decode_name}, + {"numeric", &decode_numeric}, + {"char", &decode_char}, + {"~", &decode_ignore}, + + /* internally all string types are stored the same way */ + {"charn", &decode_string}, + {"varchar", &decode_string}, + {"varcharn", &decode_string}, + {"text", &decode_string}, + {"json", &decode_string}, + {"xml", &decode_string}, + {NULL, NULL}, +}; + +static StringInfoData copyString; +static bool copyStringInitDone = false; + +/* Used by some PostgreSQL macro definitions */ +void ExceptionalCondition(const char *conditionName, const char *errorType, const char *fileName, int lineNumber) +{ + printf("Exceptional condition: name = %s, type = %s, fname = %s, line = %d\n", + conditionName ? conditionName : "(NULL)", errorType ? errorType : "(NULL)", fileName ? fileName : "(NULL)", + lineNumber); + exit(1); +} + +/* Append given string to current COPY line */ +static void CopyAppend(const char *str) +{ + if (!copyStringInitDone) { + initStringInfo(©String); + copyStringInitDone = true; + } + + /* Caller probably wanted just to init copyString */ + if (str == NULL) { + return; + } + + if (copyString.data[0] != '\0') { + appendStringInfoString(©String, "\t"); + } + + appendStringInfoString(©String, str); + + return; +} + +/* + * Append given string to current COPY line and encode special symbols + * like \r, \n, \t and \\. + */ +static int CopyAppendEncode(const char *str, int orig_len) +{ + int curr_offset = 0; + int len = orig_len; + char *tmp_buff = (char *)malloc(2 * orig_len + 1); + + if (tmp_buff == NULL) { + perror("malloc failed."); + exit(EXIT_FAILURE); + } + while (len > 0) { + /* + * Since we are working with potentially corrupted data we can + * encounter \0 as well. + */ + if (*str == '\0') { + tmp_buff[curr_offset] = '\\'; + tmp_buff[curr_offset + 1] = '0'; + curr_offset += 2; + } else if (*str == '\r') { + tmp_buff[curr_offset] = '\\'; + tmp_buff[curr_offset + 1] = 'r'; + curr_offset += 2; + } else if (*str == '\n') { + tmp_buff[curr_offset] = '\\'; + tmp_buff[curr_offset + 1] = 'n'; + curr_offset += 2; + } else if (*str == '\t') { + tmp_buff[curr_offset] = '\\'; + tmp_buff[curr_offset + 1] = 't'; + curr_offset += 2; + } else if (*str == '\\') { + tmp_buff[curr_offset] = '\\'; + tmp_buff[curr_offset + 1] = '\\'; + curr_offset += 2; + } else { + /* It's a regular symbol. */ + tmp_buff[curr_offset] = *str; + curr_offset++; + } + + str++; + len--; + } + + tmp_buff[curr_offset] = '\0'; + CopyAppend(tmp_buff); + free(tmp_buff); + + return 0; +} + +/* + * Decode a numeric type and append the result to current COPY line + */ +static int CopyAppendNumeric(const char *buffer, int num_size) +{ + struct NumericData *num = (struct NumericData *)malloc(num_size); + + if (num == NULL) { + return -2; + } + + errno_t rc = memcpy_s((char *)num, num_size, buffer, num_size); + securec_check(rc, "\0", "\0"); + + if (NUMERIC_IS_SPECIAL(num)) { + int result = -2; + + if (NUMERIC_IS_NINF(num)) { + CopyAppend("-Infinity"); + result = 0; + } + if (NUMERIC_IS_PINF(num)) { + CopyAppend("Infinity"); + result = 0; + } + if (NUMERIC_IS_NAN(num)) { + CopyAppend("NaN"); + result = 0; + } + + free(num); + + return result; + } else { + int sign; + int weight; + int dscale; + int ndigits; + int i; + char *str; + char *cp; + char *endcp; + int d; + bool putit; + NumericDigit d1; + NumericDigit dig; + NumericDigit *digits; + + sign = NUMERIC_SIGN(num); + weight = NUMERIC_WEIGHT(num); + dscale = NUMERIC_DSCALE(num); + + if (num_size == (int)NUMERIC_HEADER_SIZE(num)) { + /* No digits - compressed zero. */ + CopyAppendFmt("%d", 0); + free(num); + return 0; + } else { + ndigits = num_size / sizeof(NumericDigit); + digits = (NumericDigit *)((char *)num + NUMERIC_HEADER_SIZE(num)); + i = (weight + 1) * DEC_DIGITS; + if (i <= 0) { + i = 1; + } + + str = (char *)malloc(i + dscale + DEC_DIGITS + 2); + cp = str; + + /* + * Output a dash for negative values + */ + if (sign == NUMERIC_NEG) { + *cp++ = '-'; + } + + /* + * Output all digits before the decimal point + */ + if (weight < 0) { + d = weight + 1; + *cp++ = '0'; + } else { + for (d = 0; d <= weight; d++) { + dig = (d < ndigits) ? digits[d] : 0; + + /* + * In the first digit, suppress extra leading decimal + * zeroes + */ + putit = (d > 0); + d1 = dig / 1000; + dig -= d1 * 1000; + putit |= (d1 > 0); + if (putit) { + *cp++ = d1 + '0'; + } + d1 = dig / 100; + dig -= d1 * 100; + putit |= (d1 > 0); + if (putit) { + *cp++ = d1 + '0'; + } + d1 = dig / 10; + dig -= d1 * 10; + putit |= (d1 > 0); + if (putit) { + *cp++ = d1 + '0'; + } + *cp++ = dig + '0'; + } + } + + /* + * If requested, output a decimal point and all the digits that + * follow it. We initially put out a multiple of DEC_DIGITS + * digits, then truncate if needed. + */ + if (dscale > 0) { + *cp++ = '.'; + endcp = cp + dscale; + for (i = 0; i < dscale; d++, i += DEC_DIGITS) { + dig = (d >= 0 && d < ndigits) ? digits[d] : 0; + d1 = dig / 1000; + dig -= d1 * 1000; + *cp++ = d1 + '0'; + d1 = dig / 100; + dig -= d1 * 100; + *cp++ = d1 + '0'; + d1 = dig / 10; + dig -= d1 * 10; + *cp++ = d1 + '0'; + *cp++ = dig + '0'; + } + cp = endcp; + } + *cp = '\0'; + CopyAppend(str); + free(str); + free(num); + return 0; + } + } +} + +/* Discard accumulated COPY line */ +static void CopyClear(void) +{ + /* Make sure init is done */ + CopyAppend(NULL); + + resetStringInfo(©String); +} + +/* Output and then clear accumulated COPY line */ +static void CopyFlush(void) +{ + /* Make sure init is done */ + CopyAppend(NULL); + + printf("COPY: %s\n", copyString.data); + CopyClear(); +} + +/* + * Add a callback to `callbacks` table for given type name + * + * Arguments: + * type - name of a single type, always lowercase + * + * Return value is: + * == 0 - no error + * < 0 - invalid type name + */ +static int AddTypeCallback(const char *type) +{ + int idx = 0; + + if (*type == '\0') { /* ignore empty strings */ + return 0; + } + + while (callback_table[idx].name != NULL) { + if (strcmp(callback_table[idx].name, type) == 0) { + callbacks[ncallbacks] = callback_table[idx].callback; + if ((strcmp(type, "~") == 0) & (ignore_location == -1)) { + ignore_location = ncallbacks; + } + ncallbacks++; + return 0; + } + idx++; + } + + printf("Error: type <%s> doesn't exist or is not currently supported\n", type); + printf("Full list of known types: "); + idx = 0; + while (callback_table[idx].name != NULL) { + printf("%s ", callback_table[idx].name); + idx++; + } + printf("\n"); + return -1; +} + +/* + * Decode attribute types string like "int,timestamp,bool,uuid" + * + * Arguments: + * str - types string + * Return value is: + * == 0 - if string is valid + * < 0 - if string is invalid + */ +int ParseAttributeTypesString(const char *str) +{ + char *curr_type, *next_type; + char attrtypes[ATTRTYPES_STR_MAX_LEN + 1]; + int i, len = strlen(str); + + if (len > ATTRTYPES_STR_MAX_LEN) { + printf("Error: attribute types string is longer then %u characters!\n", ATTRTYPES_STR_MAX_LEN); + return -1; + } + + errno_t rc = strcpy_s(attrtypes, sizeof attrtypes, str); + securec_check(rc, "", ""); + for (i = 0; i < len; i++) + attrtypes[i] = tolower(attrtypes[i]); + + curr_type = attrtypes; + while (curr_type) { + next_type = strstr(curr_type, ","); + if (next_type) { + *next_type = '\0'; + next_type++; + } + + if (AddTypeCallback(curr_type) < 0) { + return -1; + } + + curr_type = next_type; + } + + return 0; +} + +/* + * Convert Julian day number (JDN) to a date. + * Copy-pasted from src/common/backend/utils/adt/datetime.cpp + */ + +void j2date(int jd, int *year, int *month, int *day) +{ + unsigned int julian; + unsigned int quad; + unsigned int extra; + int y; + + julian = jd; + julian += 32044; + quad = julian / 146097; + extra = (julian - quad * 146097) * 4 + 3; + julian += 60 + quad * 3 + extra / 146097; + quad = julian / 1461; + julian -= quad * 1461; + y = julian * 4 / 1461; + julian = ((y != 0) ? ((julian + 305) % 365) : ((julian + 306) % 366)) + 123; + y += quad * 4; + *year = y - 4800; + quad = julian * 2141 / 65536; + *day = julian - 7834 * quad / 256; + *month = (quad + 10) % MONTHS_PER_YEAR + 1; +} + +/* 定义模板函数来处理整数类型的解码 */ +template +static int decode_integral(const char *buffer, unsigned int buff_size, unsigned int *out_size, const char *FORMAT) +{ + unsigned int delta = 0; + if (!isUHeap) { + const char *new_buffer = (const char *)LONGALIGN(buffer); + if (sizeof(T) <= 4) { + new_buffer = (const char *)INTALIGN(buffer); + } + if (sizeof(T) <= 2) { + new_buffer = (const char *)SHORTALIGN(buffer); + } + delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer = new_buffer; + } + CHECK_BUFFER_SIZE(buff_size, sizeof(T)); + if (!itemIsNull) { + CopyAppendFmt(FORMAT, (*(T *)buffer)); + } + *out_size = sizeof(T) + delta; + return DECODE_SUCCESS; +} +/* Decode a smallint type */ +static int decode_smallint(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_integral(buffer, buff_size, out_size, INT16_FORMAT); +} +/* Decode an int type */ +static int decode_int(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_integral(buffer, buff_size, out_size, INT32_FORMAT); +} +/* Decode an unsigned int type */ +static int decode_uint(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_integral(buffer, buff_size, out_size, UINT32_FORMAT); +} +/* Decode an unsigned int type */ +static int decode_uint64(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_integral(buffer, buff_size, out_size, UINT64_FORMAT); +} +/* Decode a bigint type */ +static int decode_bigint(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_integral(buffer, buff_size, out_size, INT64_FORMAT); +} + +/* 定义模板函数来处理时间类型的解码 */ +template +static int decode_time_template(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + if (WITH_TIMEZONE & itemIsNull) { + return DECODE_SUCCESS; + } + unsigned int delta = 0; + int64 timestamp, timestamp_sec; + int32 tz_sec = 0, tz_min = 0; + + if (!isUHeap) { + const char *new_buffer = (const char *)LONGALIGN(buffer); + delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + + buff_size -= delta; + buffer = new_buffer; + } + + if (WITH_TIMEZONE & isUHeap) { + unsigned int new_buffer_size = itemSize - (unsigned int)LONGALIGN(itemSize - buff_size); + delta = (unsigned int)(buff_size - new_buffer_size); + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer += delta; + } + + if (WITH_TIMEZONE) { + CHECK_BUFFER_SIZE(buff_size, (sizeof(int64) + sizeof(int32))); + timestamp = *(int64 *)buffer; + tz_sec = *(int32 *)(buffer + sizeof(int64)); + timestamp_sec = timestamp / 1000000; + tz_min = -(tz_sec / 60); + *out_size = sizeof(int64) + sizeof(int32) + delta; + } else { + CHECK_BUFFER_SIZE(buff_size, sizeof(int64)); + timestamp = *(int64 *)buffer; + timestamp_sec = timestamp / 1000000; + *out_size = sizeof(int64) + delta; + } + + if (!itemIsNull) { + if (WITH_TIMEZONE) { + CopyAppendFmt("%02" INT64_MODIFIER "d:%02" INT64_MODIFIER "d:%02" INT64_MODIFIER "d.%06" INT64_MODIFIER + "d%c%02d:%02d", + timestamp_sec / 60 / 60, (timestamp_sec / 60) % 60, timestamp_sec % 60, timestamp % 1000000, + (tz_min > 0 ? '+' : '-'), abs(tz_min / 60), abs(tz_min % 60)); + } else { + CopyAppendFmt("%02" INT64_MODIFIER "d:%02" INT64_MODIFIER "d:%02" INT64_MODIFIER "d.%06" INT64_MODIFIER "d", + timestamp_sec / 60 / 60, (timestamp_sec / 60) % 60, timestamp_sec % 60, timestamp % 1000000); + } + } + return DECODE_SUCCESS; +} + +/* Decode a time type */ +static int decode_time(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_time_template(buffer, buff_size, out_size); +} + +/* Decode a timetz type */ +static int decode_timetz(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_time_template(buffer, buff_size, out_size); +} + +/* Decode a date type */ +static int decode_date(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + const char *new_buffer = (const char *)INTALIGN(buffer); + unsigned int delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + int32 d, jd, year, month, day; + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer = new_buffer; + + CHECK_BUFFER_SIZE(buff_size, sizeof(int32)); + *out_size = sizeof(int32) + delta; + + d = *(int32 *)buffer; + if (d == PG_INT32_MIN) { + CopyAppend("-infinity"); + return DECODE_SUCCESS; + } + if (d == PG_INT32_MAX) { + CopyAppend("infinity"); + return DECODE_SUCCESS; + } + + jd = d + POSTGRES_EPOCH_JDATE; + j2date(jd, &year, &month, &day); + + CopyAppendFmt("%04d-%02d-%02d%s", (year <= 0) ? -year + 1 : year, month, day, (year <= 0) ? " BC" : ""); + + return DECODE_SUCCESS; +} + +/* Decode a timestamp type */ +static int decode_timestamp_internal(const char *buffer, unsigned int buff_size, unsigned int *out_size, + bool with_timezone) +{ + int64 timestamp, timestamp_sec; + int32 jd, year, month, day; + unsigned int delta = 0; + + if (!isUHeap) { + const char *new_buffer = (const char *)LONGALIGN(buffer); + delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer = new_buffer; + } + + *out_size = sizeof(int64) + delta; + timestamp = *(int64 *)buffer; + + if (timestamp == DT_NOBEGIN) { + CopyAppend("-infinity"); + return DECODE_SUCCESS; + } + if (timestamp == DT_NOEND) { + CopyAppend("infinity"); + return DECODE_SUCCESS; + } + + jd = timestamp / USECS_PER_DAY; + if (jd != 0) { + timestamp -= jd * USECS_PER_DAY; + } + + if (timestamp < INT64CONST(0)) { + timestamp += USECS_PER_DAY; + jd -= 1; + } + + /* add offset to go from J2000 back to standard Julian date */ + jd += POSTGRES_EPOCH_JDATE; + + j2date(jd, &year, &month, &day); + timestamp_sec = timestamp / 1000000; + + if (!itemIsNull) { + CopyAppendFmt("%04d-%02d-%02d %02" INT64_MODIFIER "d:%02" INT64_MODIFIER "d:%02" INT64_MODIFIER + "d.%06" INT64_MODIFIER "d%s%s", + (year <= 0) ? -year + 1 : year, month, day, timestamp_sec / 60 / 60, (timestamp_sec / 60) % 60, + timestamp_sec % 60, timestamp % 1000000, with_timezone ? "+00" : "", (year <= 0) ? " BC" : ""); + } + + return DECODE_SUCCESS; +} + +static int decode_timestamp(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_timestamp_internal(buffer, buff_size, out_size, false); +} + +static int decode_timestamptz(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_timestamp_internal(buffer, buff_size, out_size, true); +} + +/* 定义模板函数来处理浮点类型的解码 */ +template +static int decode_float(const char *buffer, unsigned int buff_size, unsigned int *out_size, const char *type_name) +{ + unsigned int delta = 0; + if (!isUHeap) { + const char *new_buffer = (const char *)DOUBLEALIGN(buffer); + if (strcmp(type_name, "float4") == 0) { + new_buffer = (const char *)INTALIGN(buffer); + } else if (strcmp(type_name, "float8") == 0) { + new_buffer = (const char *)LONGALIGN(buffer); + } + delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer = new_buffer; + } + CHECK_BUFFER_SIZE(buff_size, sizeof(T)); + /* 使用类型名称作为格式化字符串的一部分 */ + if (!itemIsNull) { + CopyAppendFmt("%.*g", (int)(sizeof(T) * 2), *(T *)buffer); + } + *out_size = sizeof(T) + delta; + return DECODE_SUCCESS; +} + +/* Decode a float4 type */ +static int decode_float4(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_float(buffer, buff_size, out_size, "float4"); +} + +/* Decode a float8 type */ +static int decode_float8(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + return decode_float(buffer, buff_size, out_size, "float8"); +} + +/* Decode an uuid type */ +static int decode_uuid(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + if (itemIsNull) { + return DECODE_SUCCESS; + } + unsigned char uuid[16]; + + CHECK_BUFFER_DELTA_SIZE(buff_size, sizeof(uuid)); + + errno_t rc = memcpy_s(uuid, sizeof(uuid), buffer, sizeof(uuid)); + securec_check(rc, "\0", "\0"); + CopyAppendFmt("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", uuid[0], uuid[1], uuid[2], + uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], + uuid[14], uuid[15]); + *out_size = sizeof(uuid); + return DECODE_SUCCESS; +} + +/* Decode a macaddr type */ +static int decode_macaddr(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + if (itemIsNull) { + return DECODE_SUCCESS; + } + unsigned int delta = 0; + unsigned char macaddr[6]; + if (!isUHeap) { + const char *new_buffer = (const char *)INTALIGN(buffer); + delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer = new_buffer; + } + + if (isUHeap) { + unsigned int new_buffer_size = itemSize - (unsigned int)INTALIGN(itemSize - buff_size); + delta = (unsigned int)(buff_size - new_buffer_size); + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer += delta; + } + + CHECK_BUFFER_SIZE(buff_size, sizeof(macaddr)); + errno_t rc = memcpy_s(macaddr, sizeof(macaddr), buffer, sizeof(macaddr)); + securec_check(rc, "\0", "\0"); + CopyAppendFmt("%02x:%02x:%02x:%02x:%02x:%02x", macaddr[0], macaddr[1], macaddr[2], macaddr[3], macaddr[4], + macaddr[5]); + *out_size = sizeof(macaddr) + delta; + return DECODE_SUCCESS; +} + +/* Decode a bool type */ +static int decode_bool(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + CHECK_BUFFER_DELTA_SIZE(buff_size, sizeof(bool)); + + if (!itemIsNull) { + CopyAppend(*(bool *)buffer ? "t" : "f"); + } + + *out_size = sizeof(bool); + return DECODE_SUCCESS; +} + +/* Decode a name type (used mostly in catalog tables) */ +static int decode_name(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + if (itemIsNull) { + return DECODE_SUCCESS; + } + CHECK_BUFFER_DELTA_SIZE(buff_size, NAMEDATALEN); + CopyAppendEncode(buffer, strnlen(buffer, NAMEDATALEN)); + *out_size = NAMEDATALEN; + return DECODE_SUCCESS; +} + +/* + * Decode numeric type. + */ +static int decode_numeric(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + int result = extract_data(buffer, buff_size, out_size, &CopyAppendNumeric); + return result; +} + +/* Decode a char type */ +static int decode_char(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + CHECK_BUFFER_SIZE(buff_size, sizeof(char)); + + if (!itemIsNull) { + CopyAppendEncode(buffer, 1); + } + *out_size = 1; + return DECODE_SUCCESS; +} + +/* Ignore all data left */ +static int decode_ignore(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + *out_size = buff_size; + return DECODE_SUCCESS; +} + +/* Decode char(N), varchar(N), text, json or xml types */ +static int decode_string(const char *buffer, unsigned int buff_size, unsigned int *out_size) +{ + int result = extract_data(buffer, buff_size, out_size, &CopyAppendEncode); + return result; +} + +/* + * Align data, parse varlena header, detoast and decompress. + * Last parameters responds for actual parsing according to type. + */ +static int extract_data(const char *buffer, unsigned int buff_size, unsigned int *out_size, + int (*parse_value)(const char *, int)) +{ + if (itemIsNull) { + return DECODE_SUCCESS; + } + + int padding = 0; + int result = 0; + + /* Skip padding bytes. */ + while (*buffer == 0x00) { + if (buff_size == 0) { + return DECODE_BUFF_SIZE_IS_ZERO; + } + + buff_size--; + buffer++; + padding++; + } + + if (VARATT_IS_1B_E(buffer)) { + /* + * 00000001 1-byte length word, unaligned, TOAST pointer + */ + uint32 len = VARSIZE_EXTERNAL(buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, len); + + if (blockOptions & BLOCK_DECODE_TOAST) { + result = ReadStringFromToast(buffer, buff_size, out_size, parse_value); + } else if (VARATT_IS_EXTERNAL_ONDISK(buffer)) { + varatt_external toast_ptr; + VARATT_EXTERNAL_GET_POINTER(toast_ptr, buffer); + if (VARATT_EXTERNAL_IS_COMPRESSED(toast_ptr)) { + CopyAppend("(TOASTED,pglz)"); + } else + CopyAppend("(TOASTED,uncompressed)"); + } + /* If tag is indirect or expanded, it was stored in memory. */ + else + CopyAppend("(TOASTED IN MEMORY)"); + + *out_size = padding + len; + return result; + } + + if (VARATT_IS_1B(buffer)) { + /* + * xxxxxxx1 1-byte length word, unaligned, uncompressed data (up to + * 126b) xxxxxxx is 1 + string length + */ + uint8 len = VARSIZE_1B(buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, len); + + result = parse_value(buffer + 1, len - 1); + *out_size = padding + len; + return result; + } + + if (VARATT_IS_4B_U(buffer) && buff_size >= 4) { + /* + * xxxxxx00 4-byte length word, aligned, uncompressed data (up to 1G) + */ + uint32 len = VARSIZE_4B(buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, len); + + result = parse_value(buffer + 4, len - 4); + *out_size = padding + len; + return result; + } + + if (VARATT_IS_4B_C(buffer) && buff_size >= 8) { + /* + * xxxxxx10 4-byte length word, aligned, *compressed* data (up to 1G) + */ + uint32 decompress_ret; + uint32 len = VARSIZE_4B(buffer); + uint32 decompressed_len = 0; + char *decompress_tmp_buff; + decompressed_len = VARRAWSIZE_4B_C(buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, len); + + if ((decompress_tmp_buff = (char *)malloc(decompressed_len)) == NULL) { + perror("malloc failed."); + exit(EXIT_FAILURE); + } + decompress_ret = pglz_decompress(VARDATA_4B_C(buffer), len - 2 * sizeof(uint32), decompress_tmp_buff, + decompressed_len, true); + if ((decompress_ret != decompressed_len) || (decompress_ret < 0)) { + printf("WARNING: Corrupted toast data, unable to decompress.\n"); + CopyAppend("(inline compressed, corrupted)"); + *out_size = padding + len; + free(decompress_tmp_buff); + return 0; + } + + result = parse_value(decompress_tmp_buff, decompressed_len); + *out_size = padding + len; + free(decompress_tmp_buff); + return result; + } + + return DECODE_FAILURE; +} + +/* + * Try to decode a tuple using a types string provided previously. + * + * Arguments: + * tupleData - pointer to the tuple data + * tupleSize - tuple size in bytes + */ +void FormatDecode(const char *tupleData, unsigned int tupleSize) +{ + UHeapDiskTuple uheader = (UHeapDiskTuple)tupleData; + HeapTupleHeader header = (HeapTupleHeader)tupleData; + const char *data = 0; + unsigned int size = 0; + itemSize = tupleSize; + if (isUHeap) { + data = tupleData + uheader->t_hoff; + size = tupleSize - uheader->t_hoff; + } else { + data = tupleData + header->t_hoff; + size = tupleSize - header->t_hoff; + } + + int curr_attr; + + CopyClear(); + + for (curr_attr = 0; curr_attr < ncallbacks; curr_attr++) { + int ret; + unsigned int processed_size = 0; + + if (isUHeap) { + if ((uheader->flag & UHEAP_HAS_NULL) && att_isnull(curr_attr, uheader->data)) { + if (curr_attr != ignore_location) { + CopyAppend("\\N"); + itemIsNull = true; + } + } else if (size < 0) { + printf("Error: unable to decode a tuple, no more bytes left. Partial data: %s\n", copyString.data); + return; + } else { + itemIsNull = false; + } + } else { + if ((header->t_infomask & HEAP_HASNULL) && att_isnull(curr_attr, header->t_bits)) { + if (curr_attr != ignore_location) { + CopyAppend("\\N"); + continue; + } + } + if (size < 0) { + printf("Error: unable to decode a tuple, no more bytes left. Partial data: %s\n", copyString.data); + return; + } + } + + ret = callbacks[curr_attr](data, size, &processed_size); + + if (ret < 0) { + switch (ret) { + case DECODE_BUFF_SIZE_LESS_THAN_DELTA: + printf("Error: unable to decode a tuple, callback #%d, returned: %s. Partial data: %s\n", + curr_attr + 1, "buff_size_LESS_THAN_delta", copyString.data); + break; + case DECODE_BUFF_SIZE_LESS_THAN_REQUIRED: + printf("Error: unable to decode a tuple, callback #%d, returned: %s. Partial data: %s\n", + curr_attr + 1, "buff_size_LESS_THAN_required", copyString.data); + break; + case DECODE_BUFF_SIZE_IS_ZERO: + printf("Error: unable to decode a tuple, callback #%d, returned: %s. Partial data: %s\n", + curr_attr + 1, "buff_size_IS_ZERO", copyString.data); + break; + case DECODE_FAILURE: + printf("Error: unable to decode a tuple, callback #%d, returned: %s. Partial data: %s\n", + curr_attr + 1, "FAILURE", copyString.data); + break; + default: + printf("Error: unable to decode a tuple, callback #%d returned %d. Partial data: %s\n", + curr_attr + 1, ret, copyString.data); + break; + } + return; + } + + size -= processed_size; + data += processed_size; + } + + if (size != 0) { + printf("Error: unable to decode a tuple, %d bytes left, 0 expected. Partial data: %s\n", size, copyString.data); + return; + } + + CopyFlush(); +} + +static int DumpCompressedString(const char *data, int32 compressed_size, int (*parse_value)(const char *, int)) +{ + uint32 decompress_ret; + char *decompress_tmp_buff = (char *)malloc(TOAST_COMPRESS_RAWSIZE(data)); + ToastCompressionId cmid; + + cmid = (ToastCompressionId)TOAST_COMPRESS_RAWMETHOD(data); + + switch (cmid) { + case TOAST_PGLZ_COMPRESSION_ID: + decompress_ret = pglz_decompress(TOAST_COMPRESS_RAWDATA(data), compressed_size - TOAST_COMPRESS_HEADER_SIZE, + decompress_tmp_buff, TOAST_COMPRESS_RAWSIZE(data), true); + break; + case TOAST_LZ4_COMPRESSION_ID: + printf("Error: compression method lz4 not supported.\n"); + printf("Try to rebuild gs_filedump for PostgreSQL server of version 14+ with --with-lz4 option.\n"); + free(decompress_tmp_buff); + return -2; + default: + decompress_ret = -1; + break; + } + + if ((decompress_ret != TOAST_COMPRESS_RAWSIZE(data)) || (decompress_ret < 0)) { + printf("WARNING: Unable to decompress a string. Data is corrupted.\n"); + printf("Returned %d while expected %d.\n", decompress_ret, TOAST_COMPRESS_RAWSIZE(data)); + } else { + CopyAppendEncode(decompress_tmp_buff, decompress_ret); + } + + free(decompress_tmp_buff); + + return decompress_ret; +} + +static int ReadStringFromToast(const char *buffer, unsigned int buff_size, unsigned int *out_size, + int (*parse_value)(const char *, int)) +{ + int result = 0; + + /* If toasted value is on disk, we'll try to restore it. */ + if (VARATT_IS_EXTERNAL_ONDISK(buffer)) { + varatt_external toast_ptr; + char *toast_data = NULL; + /* Number of chunks the TOAST data is divided into */ + int32 num_chunks; + /* Actual size of external TOASTed value */ + int32 toast_ext_size; + /* Path to directory with TOAST realtion file */ + char *toast_relation_path; + /* Filename of TOAST relation file */ + char toast_relation_filename[MAXPGPATH]; + FILE *toast_rel_fp; + unsigned int block_options = 0; + unsigned int control_options = 0; + errno_t rc; + + VARATT_EXTERNAL_GET_POINTER(toast_ptr, buffer); + + /* Extract TOASTed value */ + toast_ext_size = toast_ptr.va_extsize; + num_chunks = (toast_ext_size - 1) / TOAST_MAX_CHUNK_SIZE + 1; + printf(" TOAST value. Raw size: %8d, external size: %8d, " + "value id: %6d, toast relation id: %6d, chunks: %6d\n", + toast_ptr.va_rawsize, toast_ext_size, toast_ptr.va_valueid, toast_ptr.va_toastrelid, num_chunks); + + /* Open TOAST relation file */ + toast_relation_path = strdup(fileName); + get_parent_directory(toast_relation_path); + + if (isSegment) { + rc = sprintf_s(toast_relation_filename, MAXPGPATH, "%s/%d_%s", + *toast_relation_path ? toast_relation_path : ".", toastRelfilenode, SEGTOASTTAG); + } else { + rc = sprintf_s(toast_relation_filename, MAXPGPATH, "%s/%d", + *toast_relation_path ? toast_relation_path : ".", toast_ptr.va_toastrelid); + } + + securec_check(rc, "\0", "\0"); + toast_rel_fp = fopen(toast_relation_filename, "rb"); + if (!toast_rel_fp) { + printf("Cannot open TOAST relation %s\n", toast_relation_filename); + result = -1; + } else { + unsigned int toast_relation_block_size = 0; + toast_relation_block_size = GetBlockSize(toast_rel_fp); + fseek(toast_rel_fp, 0, SEEK_SET); + + toast_data = (char *)malloc(toast_ptr.va_rawsize); + if (toast_data == NULL) { + perror("malloc failed."); + exit(EXIT_FAILURE); + } + if (isUHeap) { + result = DumpUHeapFileContents(block_options, control_options, toast_rel_fp, toast_relation_block_size, + -1, /* no start block */ + -1, /* no end block */ + true, /* is toast relation */ + toast_ptr.va_valueid, toast_ext_size, toast_data); + } else { + result = DumpFileContents(block_options, control_options, toast_rel_fp, toast_relation_block_size, + -1, /* no start block */ + -1, /* no end block */ + true, /* is toast relation */ + toast_ptr.va_valueid, toast_ext_size, toast_data); + } + if (result != 0) { + printf("Error in TOAST file.\n"); + } else if (VARATT_EXTERNAL_IS_COMPRESSED(toast_ptr)) { + result = DumpCompressedString(toast_data, toast_ext_size, parse_value); + } else { + result = parse_value(toast_data, toast_ext_size); + } + + free(toast_data); + fclose(toast_rel_fp); + } + + free(toast_relation_path); + } + /* If tag is indirect or expanded, it was stored in memory. */ + else { + CopyAppend("(TOASTED IN MEMORY)"); + } + + return result; +} + +/* Decode an Oid as int type and pass value out. */ +static int DecodeOidBinary(const char *buffer, unsigned int buff_size, unsigned int *processed_size, Oid *result) +{ + unsigned int delta = 0; + if (!isUHeap) { + const char *new_buffer = (const char *)INTALIGN(buffer); + delta = (unsigned int)((uintptr_t)new_buffer - (uintptr_t)buffer); + + CHECK_BUFFER_DELTA_SIZE(buff_size, delta); + buff_size -= delta; + buffer = new_buffer; + } + + CHECK_BUFFER_SIZE(buff_size, sizeof(int32)); + *result = *(Oid *)buffer; + *processed_size = sizeof(Oid) + delta; + + return DECODE_SUCCESS; +} + +/* Decode char(N), varchar(N), text, json or xml types and pass data out. */ +static int DecodeBytesBinary(const char *buffer, unsigned int buff_size, unsigned int *processed_size, char *out_data, + unsigned int *out_length) +{ + if (!VARATT_IS_EXTENDED(buffer)) { + *out_length = VARSIZE(buffer) - VARHDRSZ; + + *processed_size = VARSIZE(buffer); + errno_t rc = memcpy_s(out_data, *out_length, VARDATA(buffer), *out_length); + securec_check(rc, "\0", "\0"); + } else { + printf("Error: unable read TOAST value.\n"); + } + + return DECODE_SUCCESS; +} + +/* + * Decode a TOAST chunk as a tuple (Oid toast_id, Oid chunk_id, text data). + * If decoded OID is equal toast_oid, copy data into chunk_data. + * + * Parameters: + * tuple_data - data of the tuple + * tuple_size - length of the tuple + * toast_oid - [out] oid of the TOAST value + * chunk_id - [out] number of the TOAST chunk stored in the tuple + * chunk - [out] extracted chunk data + * chunk_size - [out] number of bytes extracted from the chunk + */ +void ToastChunkDecode(const char *tuple_data, unsigned int tuple_size, Oid toast_oid, uint32 *chunk_id, + char *chunk_data, unsigned int *chunk_data_size) +{ + UHeapDiskTuple uheader = (UHeapDiskTuple)tuple_data; + HeapTupleHeader header = (HeapTupleHeader)tuple_data; + const char *data = 0; + unsigned int size = 0; + if (isUHeap) { + data = tuple_data + uheader->t_hoff; + size = tuple_size - uheader->t_hoff; + } else { + data = tuple_data + header->t_hoff; + size = tuple_size - header->t_hoff; + } + + unsigned int processed_size = 0; + Oid read_toast_oid; + int ret; + + *chunk_data_size = 0; + *chunk_id = 0; + + /* decode toast_id */ + ret = DecodeOidBinary(data, size, &processed_size, &read_toast_oid); + if (ret < 0) { + printf("Error: unable to decode a TOAST tuple toast_id, " + "decode function returned %d. Partial data: %s\n", + ret, copyString.data); + return; + } + + size -= processed_size; + data += processed_size; + if (size <= 0) { + printf("Error: unable to decode a TOAST chunk tuple, no more bytes " + "left. Partial data: %s\n", + copyString.data); + return; + } + + /* It is not what we are looking for */ + if (toast_oid != read_toast_oid) { + return; + } + + /* decode chunk_id */ + ret = DecodeOidBinary(data, size, &processed_size, chunk_id); + if (ret < 0) { + printf("Error: unable to decode a TOAST tuple chunk_id, decode " + "function returned %d. Partial data: %s\n", + ret, copyString.data); + return; + } + + size -= processed_size; + data += processed_size; + if (isUHeap) { + size -= 1; + data += 1; + } + + if (size <= 0) { + printf("Error: unable to decode a TOAST chunk tuple, no more bytes " + "left. Partial data: %s\n", + copyString.data); + return; + } + + /* decode data */ + ret = DecodeBytesBinary(data, size, &processed_size, chunk_data, chunk_data_size); + if (ret < 0) { + printf("Error: unable to decode a TOAST chunk data, decode function " + "returned %d. Partial data: %s\n", + ret, copyString.data); + return; + } + + size -= processed_size; + if (size != 0) { + printf("Error: unable to decode a TOAST chunk tuple, %d bytes left. " + "Partial data: %s\n", + size, copyString.data); + return; + } +} \ No newline at end of file diff --git a/contrib/gs_filedump/decode.h b/contrib/gs_filedump/decode.h new file mode 100644 index 0000000000000000000000000000000000000000..75ffd25894713f7541e66bed910b4868401bae17 --- /dev/null +++ b/contrib/gs_filedump/decode.h @@ -0,0 +1,281 @@ +#ifndef _GS_FILEDUMP_DECODE_H_ +#define _GS_FILEDUMP_DECODE_H_ + +#define NBASE 10000 +#define HALF_NBASE 5000 +#define DEC_DIGITS 4 /* decimal digits per NBASE digit */ +#define MUL_GUARD_DIGITS 2 /* these are measured in NBASE digits */ +#define DIV_GUARD_DIGITS 4 + +typedef int16 NumericDigit; + +#define __PRI64_PREFIX "l" +#define INT64_MODIFIER __PRI64_PREFIX + +int ParseAttributeTypesString(const char *str); + +void FormatDecode(const char *tupleData, unsigned int tupleSize); + +void ToastChunkDecode(const char *tuple_data, unsigned int tuple_size, Oid toast_oid, uint32 *chunk_id, + char *chunk_data, unsigned int *chunk_data_size); + +struct NumericShort { + uint16 n_header; /* Sign + display scale + weight */ + NumericDigit n_data[FLEXIBLE_ARRAY_MEMBER]; /* Digits */ +}; + +struct NumericLong { + uint16 n_sign_dscale; /* Sign + display scale */ + int16 n_weight; /* Weight of 1st digit */ + NumericDigit n_data[FLEXIBLE_ARRAY_MEMBER]; /* Digits */ +}; + +union NumericChoice { + uint16 n_header; /* Header word */ + struct NumericLong n_long; /* Long form (4-byte header) */ + struct NumericShort n_short; /* Short form (2-byte header) */ +}; + +struct NumericData { + union NumericChoice choice; /* choice of format */ +}; + +/* + * Interpretation of high bits. + */ + +#define NUMERIC_SIGN_MASK 0xC000 +#define NUMERIC_POS 0x0000 +#define NUMERIC_NEG 0x4000 +#define NUMERIC_SHORT 0x8000 +#define NUMERIC_SPECIAL 0xC000 + +#define NUMERIC_FLAGBITS(n) ((n)->choice.n_header & NUMERIC_SIGN_MASK) +#define NUMERIC_IS_SHORT(n) (NUMERIC_FLAGBITS(n) == NUMERIC_SHORT) +#define NUMERIC_IS_SPECIAL(n) (NUMERIC_FLAGBITS(n) == NUMERIC_SPECIAL) + +#define NUMERIC_HDRSZ (VARHDRSZ + sizeof(uint16) + sizeof(int16)) +#define NUMERIC_HDRSZ_SHORT (VARHDRSZ + sizeof(uint16)) + +/* + * If the flag bits are NUMERIC_SHORT or NUMERIC_SPECIAL, we want the short + * header; otherwise, we want the long one. Instead of testing against each + * value, we can just look at the high bit, for a slight efficiency gain. + */ +#define NUMERIC_HEADER_IS_SHORT(n) (((n)->choice.n_header & 0x8000) != 0) +#define NUMERIC_HEADER_SIZE(n) (sizeof(uint16) + (NUMERIC_HEADER_IS_SHORT(n) ? 0 : sizeof(int16))) + +/* + * Definitions for special values (NaN, positive infinity, negative infinity). + * + * The two bits after the NUMERIC_SPECIAL bits are 00 for NaN, 01 for positive + * infinity, 11 for negative infinity. (This makes the sign bit match where + * it is in a short-format value, though we make no use of that at present.) + * We could mask off the remaining bits before testing the active bits, but + * currently those bits must be zeroes, so masking would just add cycles. + */ +#define NUMERIC_EXT_SIGN_MASK 0xF000 /* high bits plus NaN/Inf flag bits */ +#define NUMERIC_NAN 0xC000 +#define NUMERIC_PINF 0xD000 +#define NUMERIC_NINF 0xF000 +#define NUMERIC_INF_SIGN_MASK 0x2000 + +#define NUMERIC_EXT_FLAGBITS(n) ((n)->choice.n_header & NUMERIC_EXT_SIGN_MASK) +#define NUMERIC_IS_NAN(n) ((n)->choice.n_header == NUMERIC_NAN) +#define NUMERIC_IS_PINF(n) ((n)->choice.n_header == NUMERIC_PINF) +#define NUMERIC_IS_NINF(n) ((n)->choice.n_header == NUMERIC_NINF) +#define NUMERIC_IS_INF(n) (((n)->choice.n_header & ~NUMERIC_INF_SIGN_MASK) == NUMERIC_PINF) + +/* + * Short format definitions. + */ + +#define NUMERIC_SHORT_SIGN_MASK 0x2000 +#define NUMERIC_SHORT_DSCALE_MASK 0x1F80 +#define NUMERIC_SHORT_DSCALE_SHIFT 7 +#define NUMERIC_SHORT_DSCALE_MAX (NUMERIC_SHORT_DSCALE_MASK >> NUMERIC_SHORT_DSCALE_SHIFT) +#define NUMERIC_SHORT_WEIGHT_SIGN_MASK 0x0040 +#define NUMERIC_SHORT_WEIGHT_MASK 0x003F +#define NUMERIC_SHORT_WEIGHT_MAX NUMERIC_SHORT_WEIGHT_MASK +#define NUMERIC_SHORT_WEIGHT_MIN (-(NUMERIC_SHORT_WEIGHT_MASK + 1)) + +/* + * Extract sign, display scale, weight. These macros extract field values + * suitable for the NumericVar format from the Numeric (on-disk) format. + * + * Note that we don't trouble to ensure that dscale and weight read as zero + * for an infinity; however, that doesn't matter since we never convert + * "special" numerics to NumericVar form. Only the constants defined below + * (const_nan, etc) ever represent a non-finite value as a NumericVar. + */ + +#define NUMERIC_DSCALE_MASK 0x3FFF +#define NUMERIC_DSCALE_MAX NUMERIC_DSCALE_MASK + +#define NUMERIC_SIGN(n) \ + (NUMERIC_IS_SHORT(n) ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) \ + : (NUMERIC_IS_SPECIAL(n) ? NUMERIC_EXT_FLAGBITS(n) : NUMERIC_FLAGBITS(n))) +#define NUMERIC_DSCALE(n) \ + (NUMERIC_HEADER_IS_SHORT((n)) \ + ? ((n)->choice.n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT \ + : ((n)->choice.n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) +#define NUMERIC_WEIGHT(n) \ + (NUMERIC_HEADER_IS_SHORT((n)) \ + ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ + ((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) \ + : ((n)->choice.n_long.n_weight)) + +#undef TrapMacro +#define TrapMacro(true, condition) (true) + +#define ATTRTYPES_STR_MAX_LEN (1024 - 1) + +typedef enum ToastCompressionId { + TOAST_PGLZ_COMPRESSION_ID = 0, + TOAST_LZ4_COMPRESSION_ID = 1, + TOAST_INVALID_COMPRESSION_ID = 2 +} ToastCompressionId; + +// 定义枚举类型来表示返回值含义 +typedef enum { + DECODE_SUCCESS = 0, // success + DECODE_BUFF_SIZE_LESS_THAN_DELTA = -1, // buff_size < delta + DECODE_BUFF_SIZE_LESS_THAN_REQUIRED = -2, // buff_size < required_size + DECODE_BUFF_SIZE_IS_ZERO = -3, // buffer_size = 0 + DECODE_FAILURE = -9 +} DecodeResult; + +/* + * Utilities for manipulation of header information for compressed + * toast entries. + */ +/* + * These macros define the "saved size" portion of va_extinfo. Its remaining + * two high-order bits identify the compression method. + * Before std14 only pglz compression method existed (with 00 bits). + */ +#define VARLENA_EXTSIZE_BITS 30 +#define VARLENA_EXTSIZE_MASK ((1U << VARLENA_EXTSIZE_BITS) - 1) +#define VARDATA_COMPRESSED_GET_COMPRESS_METHOD(ptr) ((*((uint32 *)ptr + 1)) >> VARLENA_EXTSIZE_BITS) + +#define TOAST_COMPRESS_RAWSIZE(ptr) ((*(uint32 *)ptr) & VARLENA_EXTSIZE_MASK) +#define TOAST_COMPRESS_RAWMETHOD(ptr) ((*(uint32 *)ptr) >> VARLENA_EXTSIZE_BITS) +#define TOAST_COMPRESS_RAWDATA(ptr) (ptr + sizeof(uint32)) +#define TOAST_COMPRESS_HEADER_SIZE (sizeof(uint32)) + +// 定义输出格式宏 +#define INT16_FORMAT "%d" +#define INT32_FORMAT "%d" +#define UINT32_FORMAT "%u" + +#define CHECK_BUFFER_SIZE(buff_size, required_size) \ + do { \ + if (buff_size < required_size) { \ + return DECODE_BUFF_SIZE_LESS_THAN_REQUIRED; \ + } \ + } while (0) + +#define CHECK_BUFFER_DELTA_SIZE(buff_size, delta) \ + do { \ + if (buff_size < delta) { \ + return DECODE_BUFF_SIZE_LESS_THAN_DELTA; \ + } \ + } while (0) + +/* CopyAppend version with format string support */ +#define CopyAppendFmt(fmt, ...) \ + do { \ + char __copy_format_buff[512]; \ + snprintf_s(__copy_format_buff, sizeof(__copy_format_buff), sizeof(__copy_format_buff) - 1, fmt, \ + ##__VA_ARGS__); \ + CopyAppend(__copy_format_buff); \ + } while (0) + +typedef int (*decode_callback_t)(const char *buffer, unsigned int buff_size, unsigned int *out_size); + +typedef struct { + char *name; + decode_callback_t callback; +} ParseCallbackTableItem; + +/* for ustore begin */ + +int ParseUHeapAttributeTypesString(const char *str); + +void FormatUHeapDecode(const char *tupleData, unsigned int tupleSize); + +void ToastUHeapChunkDecode(const char *tuple_data, unsigned int tuple_size, Oid toast_oid, uint32 *chunk_id, + char *chunk_data, unsigned int *chunk_data_size); + +/* ---------- + * PGLZ_MAX_OUTPUT - + * + * Macro to compute the buffer size required by pglz_compress(). + * We allow 4 bytes for overrun before detecting compression failure. + * ---------- + */ +#define PGLZ_MAX_OUTPUT(_dlen) ((_dlen) + 4) + +/* ---------- + * PGLZ_Strategy - + * + * Some values that control the compression algorithm. + * + * min_input_size Minimum input data size to consider compression. + * + * max_input_size Maximum input data size to consider compression. + * + * min_comp_rate Minimum compression rate (0-99%) to require. + * Regardless of min_comp_rate, the output must be + * smaller than the input, else we don't store + * compressed. + * + * first_success_by Abandon compression if we find no compressible + * data within the first this-many bytes. + * + * match_size_good The initial GOOD match size when starting history + * lookup. When looking up the history to find a + * match that could be expressed as a tag, the + * algorithm does not always walk back entirely. + * A good match fast is usually better than the + * best possible one very late. For each iteration + * in the lookup, this value is lowered so the + * longer the lookup takes, the smaller matches + * are considered good. + * + * match_size_drop The percentage by which match_size_good is lowered + * after each history check. Allowed values are + * 0 (no change until end) to 100 (only check + * latest history entry at all). + * ---------- + */ +typedef struct PGLZ_Strategy { + int32 min_input_size; + int32 max_input_size; + int32 min_comp_rate; + int32 first_success_by; + int32 match_size_good; + int32 match_size_drop; +} PGLZ_Strategy; + +/* ---------- + * The standard strategies + * + * PGLZ_strategy_default Recommended default strategy for TOAST. + * + * PGLZ_strategy_always Try to compress inputs of any length. + * Fallback to uncompressed storage only if + * output would be larger than input. + * ---------- + */ +extern const PGLZ_Strategy *const PGLZ_strategy_default; +extern const PGLZ_Strategy *const PGLZ_strategy_always; + +/* ---------- + * Global function declarations + * ---------- + */ +extern int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy); +extern int32 pglz_decompress(const char *source, int32 slen, char *dest, int32 rawsize, bool check_complete); + +#endif diff --git a/contrib/gs_filedump/expected/datatypes.out b/contrib/gs_filedump/expected/datatypes.out new file mode 100644 index 0000000000000000000000000000000000000000..ff40415790a4e213461b6e8dc7e1d6d68aa0dd7e --- /dev/null +++ b/contrib/gs_filedump/expected/datatypes.out @@ -0,0 +1,988 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table "int,text" (i int, t text); +insert into "int,text" values (1, 'one'), (null, 'two'), (3, null), (4, 'four'); +\set relname int,text +\ir run_test.sql +\echo Testing :relname +Testing int,text +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: int,text.heap +* Options used: -D int,text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8056 (0x1f78) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8016 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 1 one + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: \N two + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 3 \N + Item 4 -- Length: 33 Offset: 8056 (0x1f78) Flags: NORMAL +COPY: 4 four + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +-- do one test without options +\! pg_filedump int,text.heap | sed -e 's/logid ./logid ./' -e 's/recoff 0x......../recoff 0x......../' + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: int,text.heap +* Options used: None +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8056 (0x1f78) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8016 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL + Item 4 -- Length: 33 Offset: 8056 (0x1f78) Flags: NORMAL + + +*** End of File Encountered. Last Block Read: 0 *** +---------------------------------------------------------------------------------------------- +create table bigint (x bigint); +insert into bigint values (-1), (0), (1), (null); +\set relname bigint +\ir run_test.sql +\echo Testing :relname +Testing bigint +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: bigint.heap +* Options used: -D bigint +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: -1 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table bool (x bool); +insert into bool values (true), (false), (null); +\set relname bool +\ir run_test.sql +\echo Testing :relname +Testing bool +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: bool.heap +* Options used: -D bool +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8104 (0x1fa8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8068 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 25 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: t + Item 2 -- Length: 25 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: f + Item 3 -- Length: 24 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table char (x "char"); +insert into char values ('x'), (null); +\set relname char +\ir run_test.sql +\echo Testing :relname +Testing char +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: char.heap +* Options used: -D char +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8136 (0x1fc8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8104 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 25 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: x + Item 2 -- Length: 24 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table "charN" (x char(5)); +insert into "charN" values ('x'), ('xxxxx'), (null); +\set relname charN +\ir run_test.sql +\echo Testing :relname +Testing charN +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: charN.heap +* Options used: -D charN +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8104 (0x1fa8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8068 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 30 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: x + Item 2 -- Length: 30 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: xxxxx + Item 3 -- Length: 24 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table date (x date); +insert into date values ('2000-01-01'), ('1900-02-02'), ('2100-12-31'), ('100-01-01 BC'), ('-infinity'), ('infinity'), (null); +\set relname date +\ir run_test.sql +\echo Testing :relname +Testing date +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: date.heap +* Options used: -D date +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7976 (0x1f28) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7924 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 28 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 2000-01-01 + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 1900-02-02 + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 2100-12-31 + Item 4 -- Length: 28 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: 0100-01-01 BC + Item 5 -- Length: 28 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: -infinity + Item 6 -- Length: 28 Offset: 8000 (0x1f40) Flags: NORMAL +COPY: infinity + Item 7 -- Length: 24 Offset: 7976 (0x1f28) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table int (x int); +insert into int values (-1), (0), (1), (null); +\set relname int +\ir run_test.sql +\echo Testing :relname +Testing int +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: int.heap +* Options used: -D int +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 28 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: -1 + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table json (x json); +insert into json values ('1'), ('"one"'), ('{"a":"b"}'), ('null'), (null); +\set relname json +\ir run_test.sql +\echo Testing :relname +Testing json +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: json.heap +* Options used: -D json +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8032 (0x1f60) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7988 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 26 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 1 + Item 2 -- Length: 30 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: "one" + Item 3 -- Length: 34 Offset: 8088 (0x1f98) Flags: NORMAL +COPY: {"a":"b"} + Item 4 -- Length: 29 Offset: 8056 (0x1f78) Flags: NORMAL +COPY: null + Item 5 -- Length: 24 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table macaddr (x macaddr); +insert into macaddr values ('00:10:20:30:40:50'), (null); +\set relname macaddr +\ir run_test.sql +\echo Testing :relname +Testing macaddr +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: macaddr.heap +* Options used: -D macaddr +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8136 (0x1fc8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8104 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 30 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 00:10:20:30:40:50 + Item 2 -- Length: 24 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table name (x name); +insert into name values ('name'), ('1234567890123456789012345678901234567890123456789012345678901234567890'), (null); +\set relname name +\ir run_test.sql +\echo Testing :relname +Testing name +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: name.heap +* Options used: -D name +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 7992 (0x1f38) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 7956 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 88 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: name + Item 2 -- Length: 88 Offset: 8016 (0x1f50) Flags: NORMAL +COPY: 123456789012345678901234567890123456789012345678901234567890123 + Item 3 -- Length: 24 Offset: 7992 (0x1f38) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table oid (x oid); +insert into oid values (-1), (0), (1), (null); +\set relname oid +\ir run_test.sql +\echo Testing :relname +Testing oid +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: oid.heap +* Options used: -D oid +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 28 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 4294967295 + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table smallint (x smallint); +insert into smallint values (-1), (0), (1), (null); +\set relname smallint +\ir run_test.sql +\echo Testing :relname +Testing smallint +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: smallint.heap +* Options used: -D smallint +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 26 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: -1 + Item 2 -- Length: 26 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 26 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table text (x text); +insert into text values ('hello world'), (null); +\set relname text +\ir run_test.sql +\echo Testing :relname +Testing text +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: text.heap +* Options used: -D text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8128 (0x1fc0) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8096 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 36 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: hello world + Item 2 -- Length: 24 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table time (x time); +insert into time values ('00:00'), ('23:59:59'), ('23:59:60'), (null); +\set relname time +\ir run_test.sql +\echo Testing :relname +Testing time +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: time.heap +* Options used: -D time +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 00:00:00.000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 23:59:59.000000 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 24:00:00.000000 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table timestamp (x timestamp); +insert into timestamp values ('2000-01-01 00:00'), ('100-01-01 BC 2:22'), ('infinity'), ('-infinity'), (null); +\set relname timestamp +\ir run_test.sql +\echo Testing :relname +Testing timestamp +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: timestamp.heap +* Options used: -D timestamp +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8040 (0x1f68) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7996 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 2000-01-01 00:00:00.000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0100-01-01 02:22:00.000000 BC + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: infinity + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: -infinity + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +set timezone = 'Etc/UTC'; +create table timestamptz (x timestamptz); +insert into timestamptz values ('2000-01-01 00:00'), ('100-01-01 BC 2:22'), ('infinity'), ('-infinity'), (null); +\set relname timestamptz +\ir run_test.sql +\echo Testing :relname +Testing timestamptz +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: timestamptz.heap +* Options used: -D timestamptz +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8040 (0x1f68) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7996 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 2000-01-01 00:00:00.000000+00 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0100-01-01 02:22:00.000000+00 BC + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: infinity + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: -infinity + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table timetz (x timetz); +insert into timetz values ('00:00 Etc/UTC'), ('23:59:59 Etc/UTC'), ('23:59:60 Etc/UTC'), ('1:23+4:56'), (null); +\set relname timetz +\ir run_test.sql +\echo Testing :relname +Testing timetz +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: timetz.heap +* Options used: -D timetz +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7964 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 36 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: 00:00:00.000000-00:00 + Item 2 -- Length: 36 Offset: 8112 (0x1fb0) Flags: NORMAL +COPY: 23:59:59.000000-00:00 + Item 3 -- Length: 36 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: 24:00:00.000000-00:00 + Item 4 -- Length: 36 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: 01:23:00.000000+04:56 + Item 5 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table uuid (x uuid); +insert into uuid values ('b4f0e2d6-429b-48bd-af06-6578829dd980'), ('00000000-0000-0000-0000-000000000000'), (null); +\set relname uuid +\ir run_test.sql +\echo Testing :relname +Testing uuid +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: uuid.heap +* Options used: -D uuid +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8088 (0x1f98) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8052 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 40 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: b4f0e2d6-429b-48bd-af06-6578829dd980 + Item 2 -- Length: 40 Offset: 8112 (0x1fb0) Flags: NORMAL +COPY: 00000000-0000-0000-0000-000000000000 + Item 3 -- Length: 24 Offset: 8088 (0x1f98) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table varchar (x varchar); +insert into varchar values ('Hello World'), (''), (null); +\set relname varchar +\ir run_test.sql +\echo Testing :relname +Testing varchar +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: varchar.heap +* Options used: -D varchar +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8096 (0x1fa0) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8060 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 36 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: Hello World + Item 2 -- Length: 25 Offset: 8120 (0x1fb8) Flags: NORMAL +COPY: + Item 3 -- Length: 24 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table "varcharN" (x varchar(11)); +insert into "varcharN" values ('Hello World'), (''), (null); +\set relname varcharN +\ir run_test.sql +\echo Testing :relname +Testing varcharN +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: varcharN.heap +* Options used: -D varcharN +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8096 (0x1fa0) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8060 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 36 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: Hello World + Item 2 -- Length: 25 Offset: 8120 (0x1fb8) Flags: NORMAL +COPY: + Item 3 -- Length: 24 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table xid (x xid); +insert into xid values ('-1'), ('0'), ('1'), (null); +\set relname xid +\ir run_test.sql +\echo Testing :relname +Testing xid +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: xid.heap +* Options used: -D xid +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 28 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 4294967295 + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/datatypes_3.out b/contrib/gs_filedump/expected/datatypes_3.out new file mode 100644 index 0000000000000000000000000000000000000000..f7ea25f2fef1894b74abf78aa9ff690bba81e9ac --- /dev/null +++ b/contrib/gs_filedump/expected/datatypes_3.out @@ -0,0 +1,988 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table "int,text" (i int, t text); +insert into "int,text" values (1, 'one'), (null, 'two'), (3, null), (4, 'four'); +\set relname int,text +\ir run_test.sql +\echo Testing :relname +Testing int,text +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: int,text.heap +* Options used: -D int,text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8068 (0x1f84) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8028 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 1 one + Item 2 -- Length: 28 Offset: 8132 (0x1fc4) Flags: NORMAL +COPY: \N two + Item 3 -- Length: 28 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: 3 \N + Item 4 -- Length: 33 Offset: 8068 (0x1f84) Flags: NORMAL +COPY: 4 four + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +-- do one test without options +\! pg_filedump int,text.heap | sed -e 's/logid ./logid ./' -e 's/recoff 0x......../recoff 0x......../' + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: int,text.heap +* Options used: None +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8068 (0x1f84) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8028 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL + Item 2 -- Length: 28 Offset: 8132 (0x1fc4) Flags: NORMAL + Item 3 -- Length: 28 Offset: 8104 (0x1fa8) Flags: NORMAL + Item 4 -- Length: 33 Offset: 8068 (0x1f84) Flags: NORMAL + + +*** End of File Encountered. Last Block Read: 0 *** +---------------------------------------------------------------------------------------------- +create table bigint (x bigint); +insert into bigint values (-1), (0), (1), (null); +\set relname bigint +\ir run_test.sql +\echo Testing :relname +Testing bigint +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: bigint.heap +* Options used: -D bigint +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: -1 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table bool (x bool); +insert into bool values (true), (false), (null); +\set relname bool +\ir run_test.sql +\echo Testing :relname +Testing bool +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: bool.heap +* Options used: -D bool +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8112 (0x1fb0) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8076 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 25 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: t + Item 2 -- Length: 25 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: f + Item 3 -- Length: 24 Offset: 8112 (0x1fb0) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table char (x "char"); +insert into char values ('x'), (null); +\set relname char +\ir run_test.sql +\echo Testing :relname +Testing char +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: char.heap +* Options used: -D char +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8140 (0x1fcc) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8108 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 25 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: x + Item 2 -- Length: 24 Offset: 8140 (0x1fcc) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table "charN" (x char(5)); +insert into "charN" values ('x'), ('xxxxx'), (null); +\set relname charN +\ir run_test.sql +\echo Testing :relname +Testing charN +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: charN.heap +* Options used: -D charN +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8104 (0x1fa8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8068 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 30 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: x + Item 2 -- Length: 30 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: xxxxx + Item 3 -- Length: 24 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table date (x date); +insert into date values ('2000-01-01'), ('1900-02-02'), ('2100-12-31'), ('100-01-01 BC'), ('-infinity'), ('infinity'), (null); +\set relname date +\ir run_test.sql +\echo Testing :relname +Testing date +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: date.heap +* Options used: -D date +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 8000 (0x1f40) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7948 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 28 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 2000-01-01 + Item 2 -- Length: 28 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: 1900-02-02 + Item 3 -- Length: 28 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: 2100-12-31 + Item 4 -- Length: 28 Offset: 8080 (0x1f90) Flags: NORMAL +COPY: 0100-01-01 BC + Item 5 -- Length: 28 Offset: 8052 (0x1f74) Flags: NORMAL +COPY: -infinity + Item 6 -- Length: 28 Offset: 8024 (0x1f58) Flags: NORMAL +COPY: infinity + Item 7 -- Length: 24 Offset: 8000 (0x1f40) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table int (x int); +insert into int values (-1), (0), (1), (null); +\set relname int +\ir run_test.sql +\echo Testing :relname +Testing int +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: int.heap +* Options used: -D int +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8084 (0x1f94) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8044 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 28 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: -1 + Item 2 -- Length: 28 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 28 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8084 (0x1f94) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table json (x json); +insert into json values ('1'), ('"one"'), ('{"a":"b"}'), ('null'), (null); +\set relname json +\ir run_test.sql +\echo Testing :relname +Testing json +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: json.heap +* Options used: -D json +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8040 (0x1f68) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7996 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 26 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 1 + Item 2 -- Length: 30 Offset: 8132 (0x1fc4) Flags: NORMAL +COPY: "one" + Item 3 -- Length: 34 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: {"a":"b"} + Item 4 -- Length: 29 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: null + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table macaddr (x macaddr); +insert into macaddr values ('00:10:20:30:40:50'), (null); +\set relname macaddr +\ir run_test.sql +\echo Testing :relname +Testing macaddr +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: macaddr.heap +* Options used: -D macaddr +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8136 (0x1fc8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8104 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 30 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 00:10:20:30:40:50 + Item 2 -- Length: 24 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table name (x name); +insert into name values ('name'), ('1234567890123456789012345678901234567890123456789012345678901234567890'), (null); +\set relname name +\ir run_test.sql +\echo Testing :relname +Testing name +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: name.heap +* Options used: -D name +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 7992 (0x1f38) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 7956 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 88 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: name + Item 2 -- Length: 88 Offset: 8016 (0x1f50) Flags: NORMAL +COPY: 123456789012345678901234567890123456789012345678901234567890123 + Item 3 -- Length: 24 Offset: 7992 (0x1f38) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table oid (x oid); +insert into oid values (-1), (0), (1), (null); +\set relname oid +\ir run_test.sql +\echo Testing :relname +Testing oid +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: oid.heap +* Options used: -D oid +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8084 (0x1f94) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8044 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 28 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 4294967295 + Item 2 -- Length: 28 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 28 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8084 (0x1f94) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table smallint (x smallint); +insert into smallint values (-1), (0), (1), (null); +\set relname smallint +\ir run_test.sql +\echo Testing :relname +Testing smallint +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: smallint.heap +* Options used: -D smallint +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8084 (0x1f94) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8044 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 26 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: -1 + Item 2 -- Length: 26 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 26 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8084 (0x1f94) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table text (x text); +insert into text values ('hello world'), (null); +\set relname text +\ir run_test.sql +\echo Testing :relname +Testing text +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: text.heap +* Options used: -D text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8132 (0x1fc4) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8100 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 36 Offset: 8156 (0x1fdc) Flags: NORMAL +COPY: hello world + Item 2 -- Length: 24 Offset: 8132 (0x1fc4) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table time (x time); +insert into time values ('00:00'), ('23:59:59'), ('23:59:60'), (null); +\set relname time +\ir run_test.sql +\echo Testing :relname +Testing time +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: time.heap +* Options used: -D time +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8072 (0x1f88) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8032 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 00:00:00.000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 23:59:59.000000 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: 24:00:00.000000 + Item 4 -- Length: 24 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table timestamp (x timestamp); +insert into timestamp values ('2000-01-01 00:00'), ('100-01-01 BC 2:22'), ('infinity'), ('-infinity'), (null); +\set relname timestamp +\ir run_test.sql +\echo Testing :relname +Testing timestamp +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: timestamp.heap +* Options used: -D timestamp +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8040 (0x1f68) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7996 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 2000-01-01 00:00:00.000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0100-01-01 02:22:00.000000 BC + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: infinity + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: -infinity + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +set timezone = 'Etc/UTC'; +create table timestamptz (x timestamptz); +insert into timestamptz values ('2000-01-01 00:00'), ('100-01-01 BC 2:22'), ('infinity'), ('-infinity'), (null); +\set relname timestamptz +\ir run_test.sql +\echo Testing :relname +Testing timestamptz +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: timestamptz.heap +* Options used: -D timestamptz +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8040 (0x1f68) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7996 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 2000-01-01 00:00:00.000000+00 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 0100-01-01 02:22:00.000000+00 BC + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: infinity + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: -infinity + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table timetz (x timetz); +insert into timetz values ('00:00 Etc/UTC'), ('23:59:59 Etc/UTC'), ('23:59:60 Etc/UTC'), ('1:23+4:56'), (null); +\set relname timetz +\ir run_test.sql +\echo Testing :relname +Testing timetz +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: timetz.heap +* Options used: -D timetz +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8024 (0x1f58) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7980 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 36 Offset: 8156 (0x1fdc) Flags: NORMAL +COPY: 00:00:00.000000-00:00 + Item 2 -- Length: 36 Offset: 8120 (0x1fb8) Flags: NORMAL +COPY: 23:59:59.000000-00:00 + Item 3 -- Length: 36 Offset: 8084 (0x1f94) Flags: NORMAL +COPY: 24:00:00.000000-00:00 + Item 4 -- Length: 36 Offset: 8048 (0x1f70) Flags: NORMAL +COPY: 01:23:00.000000+04:56 + Item 5 -- Length: 24 Offset: 8024 (0x1f58) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table uuid (x uuid); +insert into uuid values ('b4f0e2d6-429b-48bd-af06-6578829dd980'), ('00000000-0000-0000-0000-000000000000'), (null); +\set relname uuid +\ir run_test.sql +\echo Testing :relname +Testing uuid +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: uuid.heap +* Options used: -D uuid +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8088 (0x1f98) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8052 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 40 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: b4f0e2d6-429b-48bd-af06-6578829dd980 + Item 2 -- Length: 40 Offset: 8112 (0x1fb0) Flags: NORMAL +COPY: 00000000-0000-0000-0000-000000000000 + Item 3 -- Length: 24 Offset: 8088 (0x1f98) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table varchar (x varchar); +insert into varchar values ('Hello World'), (''), (null); +\set relname varchar +\ir run_test.sql +\echo Testing :relname +Testing varchar +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: varchar.heap +* Options used: -D varchar +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8104 (0x1fa8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8068 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 36 Offset: 8156 (0x1fdc) Flags: NORMAL +COPY: Hello World + Item 2 -- Length: 25 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: + Item 3 -- Length: 24 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table "varcharN" (x varchar(11)); +insert into "varcharN" values ('Hello World'), (''), (null); +\set relname varcharN +\ir run_test.sql +\echo Testing :relname +Testing varcharN +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: varcharN.heap +* Options used: -D varcharN +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 36 (0x0024) + Block: Size 8192 Version 4 Upper 8104 (0x1fa8) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 3 Free Space: 8068 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 36 + + ----- + Item 1 -- Length: 36 Offset: 8156 (0x1fdc) Flags: NORMAL +COPY: Hello World + Item 2 -- Length: 25 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: + Item 3 -- Length: 24 Offset: 8104 (0x1fa8) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table xid (x xid); +insert into xid values ('-1'), ('0'), ('1'), (null); +\set relname xid +\ir run_test.sql +\echo Testing :relname +Testing xid +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: xid.heap +* Options used: -D xid +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 40 (0x0028) + Block: Size 8192 Version 4 Upper 8084 (0x1f94) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 4 Free Space: 8044 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 40 + + ----- + Item 1 -- Length: 28 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 4294967295 + Item 2 -- Length: 28 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: 0 + Item 3 -- Length: 28 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: 1 + Item 4 -- Length: 24 Offset: 8084 (0x1f94) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/float.out b/contrib/gs_filedump/expected/float.out new file mode 100644 index 0000000000000000000000000000000000000000..625abb2067b0df9767516d03778faca001ac19c1 --- /dev/null +++ b/contrib/gs_filedump/expected/float.out @@ -0,0 +1,104 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG12+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table float4 (x float4); +insert into float4 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float4 +\ir run_test.sql +\echo Testing :relname +Testing float4 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float4.heap +* Options used: -D float4 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7960 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 28 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -Infinity + Item 4 -- Length: 28 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: Infinity + Item 5 -- Length: 28 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: NaN + Item 6 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table float8 (x float8); +insert into float8 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float8 +\ir run_test.sql +\echo Testing :relname +Testing float8 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float8.heap +* Options used: -D float8 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7960 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -Infinity + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: Infinity + Item 5 -- Length: 32 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: NaN + Item 6 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/float_1.out b/contrib/gs_filedump/expected/float_1.out new file mode 100644 index 0000000000000000000000000000000000000000..f64a0581e1b685623959aea3d73e9fae68d45e70 --- /dev/null +++ b/contrib/gs_filedump/expected/float_1.out @@ -0,0 +1,104 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG12+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table float4 (x float4); +insert into float4 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float4 +\ir run_test.sql +\echo Testing :relname +Testing float4 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float4.heap +* Options used: -D float4 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7960 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 28 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 28 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 28 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -inf + Item 4 -- Length: 28 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: inf + Item 5 -- Length: 28 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: nan + Item 6 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table float8 (x float8); +insert into float8 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float8 +\ir run_test.sql +\echo Testing :relname +Testing float8 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float8.heap +* Options used: -D float8 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7960 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -inf + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: inf + Item 5 -- Length: 32 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: nan + Item 6 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/float_3.out b/contrib/gs_filedump/expected/float_3.out new file mode 100644 index 0000000000000000000000000000000000000000..441ebe8f6389cd202e04e664a6680c5c48ba1509 --- /dev/null +++ b/contrib/gs_filedump/expected/float_3.out @@ -0,0 +1,104 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG12+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table float4 (x float4); +insert into float4 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float4 +\ir run_test.sql +\echo Testing :relname +Testing float4 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float4.heap +* Options used: -D float4 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8028 (0x1f5c) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7980 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 28 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 28 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 28 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: -Infinity + Item 4 -- Length: 28 Offset: 8080 (0x1f90) Flags: NORMAL +COPY: Infinity + Item 5 -- Length: 28 Offset: 8052 (0x1f74) Flags: NORMAL +COPY: NaN + Item 6 -- Length: 24 Offset: 8028 (0x1f5c) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table float8 (x float8); +insert into float8 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float8 +\ir run_test.sql +\echo Testing :relname +Testing float8 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float8.heap +* Options used: -D float8 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7960 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -Infinity + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: Infinity + Item 5 -- Length: 32 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: NaN + Item 6 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/float_4.out b/contrib/gs_filedump/expected/float_4.out new file mode 100644 index 0000000000000000000000000000000000000000..731487a26781f4b120655a4b289f3fa65602d151 --- /dev/null +++ b/contrib/gs_filedump/expected/float_4.out @@ -0,0 +1,104 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG12+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table float4 (x float4); +insert into float4 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float4 +\ir run_test.sql +\echo Testing :relname +Testing float4 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float4.heap +* Options used: -D float4 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8028 (0x1f5c) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7980 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 28 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 28 Offset: 8136 (0x1fc8) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 28 Offset: 8108 (0x1fac) Flags: NORMAL +COPY: -inf + Item 4 -- Length: 28 Offset: 8080 (0x1f90) Flags: NORMAL +COPY: inf + Item 5 -- Length: 28 Offset: 8052 (0x1f74) Flags: NORMAL +COPY: nan + Item 6 -- Length: 24 Offset: 8028 (0x1f5c) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- +create table float8 (x float8); +insert into float8 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float8 +\ir run_test.sql +\echo Testing :relname +Testing float8 +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: float8.heap +* Options used: -D float8 +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 48 (0x0030) + Block: Size 8192 Version 4 Upper 8008 (0x1f48) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 6 Free Space: 7960 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 48 + + ----- + Item 1 -- Length: 32 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0.000000000000 + Item 2 -- Length: 32 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: -0.000000000000 + Item 3 -- Length: 32 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -inf + Item 4 -- Length: 32 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: inf + Item 5 -- Length: 32 Offset: 8032 (0x1f60) Flags: NORMAL +COPY: nan + Item 6 -- Length: 24 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/numeric.out b/contrib/gs_filedump/expected/numeric.out new file mode 100644 index 0000000000000000000000000000000000000000..5b24dcbd716632282cf8877fbdcdaedffdc80fb9 --- /dev/null +++ b/contrib/gs_filedump/expected/numeric.out @@ -0,0 +1,57 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG14+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table numeric (x numeric); +insert into numeric values (0), ('12341234'), ('-567890'), ('NaN'), (null); +insert into numeric values ('-Infinity'), ('Infinity'); -- needs PG 14 +\set relname numeric +\ir run_test.sql +\echo Testing :relname +Testing numeric +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: numeric.heap +* Options used: -D numeric +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7976 (0x1f28) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7924 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 27 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0 + Item 2 -- Length: 31 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 12341234 + Item 3 -- Length: 31 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -567890 + Item 4 -- Length: 27 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: NaN + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + Item 6 -- Length: 27 Offset: 8008 (0x1f48) Flags: NORMAL +COPY: -Infinity + Item 7 -- Length: 27 Offset: 7976 (0x1f28) Flags: NORMAL +COPY: Infinity + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/numeric_1.out b/contrib/gs_filedump/expected/numeric_1.out new file mode 100644 index 0000000000000000000000000000000000000000..1ba9896287cbe5ecd61919c9abf936bd23597b73 --- /dev/null +++ b/contrib/gs_filedump/expected/numeric_1.out @@ -0,0 +1,56 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG14+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table numeric (x numeric); +insert into numeric values (0), ('12341234'), ('-567890'), ('NaN'), (null); +insert into numeric values ('-Infinity'), ('Infinity'); -- needs PG 14 +ERROR: invalid input syntax for type numeric: "-Infinity" +LINE 1: insert into numeric values ('-Infinity'), ('Infinity'); + ^ +\set relname numeric +\ir run_test.sql +\echo Testing :relname +Testing numeric +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: numeric.heap +* Options used: -D numeric +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8040 (0x1f68) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 7996 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 27 Offset: 8160 (0x1fe0) Flags: NORMAL +COPY: 0 + Item 2 -- Length: 31 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: 12341234 + Item 3 -- Length: 31 Offset: 8096 (0x1fa0) Flags: NORMAL +COPY: -567890 + Item 4 -- Length: 27 Offset: 8064 (0x1f80) Flags: NORMAL +COPY: NaN + Item 5 -- Length: 24 Offset: 8040 (0x1f68) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/numeric_3.out b/contrib/gs_filedump/expected/numeric_3.out new file mode 100644 index 0000000000000000000000000000000000000000..8a29621f75abbb499b3a0bc840bf0a0f17713888 --- /dev/null +++ b/contrib/gs_filedump/expected/numeric_3.out @@ -0,0 +1,57 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG14+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table numeric (x numeric); +insert into numeric values (0), ('12341234'), ('-567890'), ('NaN'), (null); +insert into numeric values ('-Infinity'), ('Infinity'); -- needs PG 14 +\set relname numeric +\ir run_test.sql +\echo Testing :relname +Testing numeric +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: numeric.heap +* Options used: -D numeric +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7992 (0x1f38) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7940 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 27 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 0 + Item 2 -- Length: 31 Offset: 8132 (0x1fc4) Flags: NORMAL +COPY: 12341234 + Item 3 -- Length: 31 Offset: 8100 (0x1fa4) Flags: NORMAL +COPY: -567890 + Item 4 -- Length: 27 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: NaN + Item 5 -- Length: 24 Offset: 8048 (0x1f70) Flags: NORMAL +COPY: \N + Item 6 -- Length: 27 Offset: 8020 (0x1f54) Flags: NORMAL +COPY: -Infinity + Item 7 -- Length: 27 Offset: 7992 (0x1f38) Flags: NORMAL +COPY: Infinity + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/numeric_4.out b/contrib/gs_filedump/expected/numeric_4.out new file mode 100644 index 0000000000000000000000000000000000000000..2065fe0b93dce80355bf3d4e351cbcc6d85315b3 --- /dev/null +++ b/contrib/gs_filedump/expected/numeric_4.out @@ -0,0 +1,56 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG14+ output in *.out/*_3.out, earlier in *_1.out/*_4.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table numeric (x numeric); +insert into numeric values (0), ('12341234'), ('-567890'), ('NaN'), (null); +insert into numeric values ('-Infinity'), ('Infinity'); -- needs PG 14 +ERROR: invalid input syntax for type numeric: "-Infinity" +LINE 1: insert into numeric values ('-Infinity'), ('Infinity'); + ^ +\set relname numeric +\ir run_test.sql +\echo Testing :relname +Testing numeric +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: numeric.heap +* Options used: -D numeric +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 44 (0x002c) + Block: Size 8192 Version 4 Upper 8048 (0x1f70) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 5 Free Space: 8004 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 44 + + ----- + Item 1 -- Length: 27 Offset: 8164 (0x1fe4) Flags: NORMAL +COPY: 0 + Item 2 -- Length: 31 Offset: 8132 (0x1fc4) Flags: NORMAL +COPY: 12341234 + Item 3 -- Length: 31 Offset: 8100 (0x1fa4) Flags: NORMAL +COPY: -567890 + Item 4 -- Length: 27 Offset: 8072 (0x1f88) Flags: NORMAL +COPY: NaN + Item 5 -- Length: 24 Offset: 8048 (0x1f70) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/toast.out b/contrib/gs_filedump/expected/toast.out new file mode 100644 index 0000000000000000000000000000000000000000..a52a29d2b0686ec57a2380a15b7d70c5f607dd57 --- /dev/null +++ b/contrib/gs_filedump/expected/toast.out @@ -0,0 +1,103 @@ +-- PG14+ output in toast.out/_3.out (32-bit); PG13- output in toast_1.out/_4.out +create table toast ( + description text, + data text +); +insert into toast values ('short inline', 'xxx'); +insert into toast values ('long inline uncompressed', repeat('x', 200)); +alter table toast alter column data set storage external; +insert into toast values ('external uncompressed', repeat('0123456789 8< ', 200)); +alter table toast alter column data set storage extended; +insert into toast values ('inline compressed pglz', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed pglz', repeat('0123456789 8< ', 20000)); +alter table toast alter column data set compression lz4; +insert into toast values ('inline compressed lz4', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed lz4', repeat('0123456789 8< ', 50000)); +vacuum toast; +checkpoint; +-- copy tables where client can read it +\set relname 'toast' +select oid as datoid from pg_database where datname = current_database() \gset +select relfilenode, reltoastrelid from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as loid \gset +\set output :relname '.heap' +\lo_export :loid :output +select lo_import(format('base/%s/%s', :'datoid', :'reltoastrelid')) as toast_loid \gset +\set output :reltoastrelid +\lo_export :toast_loid :output +\setenv relname :relname +\! pg_filedump -D text,text $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7472 (0x1d30) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7420 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8144 (0x1fd0) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7888 (0x1ed0) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7824 (0x1e90) Flags: NORMAL +COPY: external uncompressed (TOASTED,uncompressed) + Item 4 -- Length: 107 Offset: 7712 (0x1e20) Flags: NORMAL +COPY: inline compressed pglz 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 5 -- Length: 67 Offset: 7640 (0x1dd8) Flags: NORMAL +COPY: extended compressed pglz (TOASTED,pglz) + Item 6 -- Length: 90 Offset: 7544 (0x1d78) Flags: NORMAL +COPY: inline compressed lz4 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 7 -- Length: 66 Offset: 7472 (0x1d30) Flags: NORMAL +COPY: extended compressed lz4 (TOASTED,lz4) + + +*** End of File Encountered. Last Block Read: 0 *** +\! pg_filedump -D text,text -t $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" -e 's/id: ...../id: ...../g' -e 's/ 8< .*//' + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text -t +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7472 (0x1d30) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7420 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8144 (0x1fd0) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7888 (0x1ed0) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7824 (0x1e90) Flags: NORMAL + TOAST value. Raw size: 2804, external size: 2800, value id: ....., toast relation id: ....., chunks: 2 +COPY: external uncompressed 0123456789 + Item 4 -- Length: 107 Offset: 7712 (0x1e20) Flags: NORMAL +COPY: inline compressed pglz 0123456789 + Item 5 -- Length: 67 Offset: 7640 (0x1dd8) Flags: NORMAL + TOAST value. Raw size: 280004, external size: 3226, value id: ....., toast relation id: ....., chunks: 2 +COPY: extended compressed pglz 0123456789 + Item 6 -- Length: 90 Offset: 7544 (0x1d78) Flags: NORMAL +COPY: inline compressed lz4 0123456789 + Item 7 -- Length: 66 Offset: 7472 (0x1d30) Flags: NORMAL + TOAST value. Raw size: 700004, external size: 2772, value id: ....., toast relation id: ....., chunks: 2 +COPY: extended compressed lz4 0123456789 + + +*** End of File Encountered. Last Block Read: 0 *** diff --git a/contrib/gs_filedump/expected/toast_1.out b/contrib/gs_filedump/expected/toast_1.out new file mode 100644 index 0000000000000000000000000000000000000000..146739504794dc6b31a3ba2c72be6db117f904be --- /dev/null +++ b/contrib/gs_filedump/expected/toast_1.out @@ -0,0 +1,106 @@ +-- PG14+ output in toast.out/_3.out (32-bit); PG13- output in toast_1.out/_4.out +create table toast ( + description text, + data text +); +insert into toast values ('short inline', 'xxx'); +insert into toast values ('long inline uncompressed', repeat('x', 200)); +alter table toast alter column data set storage external; +insert into toast values ('external uncompressed', repeat('0123456789 8< ', 200)); +alter table toast alter column data set storage extended; +insert into toast values ('inline compressed pglz', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed pglz', repeat('0123456789 8< ', 20000)); +alter table toast alter column data set compression lz4; +ERROR: syntax error at or near "compression" +LINE 1: alter table toast alter column data set compression lz4; + ^ +insert into toast values ('inline compressed lz4', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed lz4', repeat('0123456789 8< ', 50000)); +vacuum toast; +checkpoint; +-- copy tables where client can read it +\set relname 'toast' +select oid as datoid from pg_database where datname = current_database() \gset +select relfilenode, reltoastrelid from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as loid \gset +\set output :relname '.heap' +\lo_export :loid :output +select lo_import(format('base/%s/%s', :'datoid', :'reltoastrelid')) as toast_loid \gset +\set output :reltoastrelid +\lo_export :toast_loid :output +\setenv relname :relname +\! pg_filedump -D text,text $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7456 (0x1d20) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7404 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8144 (0x1fd0) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7888 (0x1ed0) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7824 (0x1e90) Flags: NORMAL +COPY: external uncompressed (TOASTED,uncompressed) + Item 4 -- Length: 107 Offset: 7712 (0x1e20) Flags: NORMAL +COPY: inline compressed pglz 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 5 -- Length: 67 Offset: 7640 (0x1dd8) Flags: NORMAL +COPY: extended compressed pglz (TOASTED,pglz) + Item 6 -- Length: 107 Offset: 7528 (0x1d68) Flags: NORMAL +COPY: inline compressed lz4 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 7 -- Length: 66 Offset: 7456 (0x1d20) Flags: NORMAL +COPY: extended compressed lz4 (TOASTED,pglz) + + +*** End of File Encountered. Last Block Read: 0 *** +\! pg_filedump -D text,text -t $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" -e 's/id: ...../id: ...../g' -e 's/ 8< .*//' + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text -t +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7456 (0x1d20) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7404 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8144 (0x1fd0) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7888 (0x1ed0) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7824 (0x1e90) Flags: NORMAL + TOAST value. Raw size: 2804, external size: 2800, value id: ....., toast relation id: ....., chunks: 2 +COPY: external uncompressed 0123456789 + Item 4 -- Length: 107 Offset: 7712 (0x1e20) Flags: NORMAL +COPY: inline compressed pglz 0123456789 + Item 5 -- Length: 67 Offset: 7640 (0x1dd8) Flags: NORMAL + TOAST value. Raw size: 280004, external size: 3226, value id: ....., toast relation id: ....., chunks: 2 +COPY: extended compressed pglz 0123456789 + Item 6 -- Length: 107 Offset: 7528 (0x1d68) Flags: NORMAL +COPY: inline compressed lz4 0123456789 + Item 7 -- Length: 66 Offset: 7456 (0x1d20) Flags: NORMAL + TOAST value. Raw size: 700004, external size: 8035, value id: ....., toast relation id: ....., chunks: 5 +COPY: extended compressed lz4 0123456789 + + +*** End of File Encountered. Last Block Read: 0 *** diff --git a/contrib/gs_filedump/expected/toast_3.out b/contrib/gs_filedump/expected/toast_3.out new file mode 100644 index 0000000000000000000000000000000000000000..5d9b29a3e2c1d8565b3c082b805e231c5c270ffc --- /dev/null +++ b/contrib/gs_filedump/expected/toast_3.out @@ -0,0 +1,103 @@ +-- PG14+ output in toast.out/_3.out (32-bit); PG13- output in toast_1.out/_4.out +create table toast ( + description text, + data text +); +insert into toast values ('short inline', 'xxx'); +insert into toast values ('long inline uncompressed', repeat('x', 200)); +alter table toast alter column data set storage external; +insert into toast values ('external uncompressed', repeat('0123456789 8< ', 200)); +alter table toast alter column data set storage extended; +insert into toast values ('inline compressed pglz', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed pglz', repeat('0123456789 8< ', 20000)); +alter table toast alter column data set compression lz4; +insert into toast values ('inline compressed lz4', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed lz4', repeat('0123456789 8< ', 50000)); +vacuum toast; +checkpoint; +-- copy tables where client can read it +\set relname 'toast' +select oid as datoid from pg_database where datname = current_database() \gset +select relfilenode, reltoastrelid from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as loid \gset +\set output :relname '.heap' +\lo_export :loid :output +select lo_import(format('base/%s/%s', :'datoid', :'reltoastrelid')) as toast_loid \gset +\set output :reltoastrelid +\lo_export :toast_loid :output +\setenv relname :relname +\! pg_filedump -D text,text $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7492 (0x1d44) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7440 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8148 (0x1fd4) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7892 (0x1ed4) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7828 (0x1e94) Flags: NORMAL +COPY: external uncompressed (TOASTED,uncompressed) + Item 4 -- Length: 107 Offset: 7720 (0x1e28) Flags: NORMAL +COPY: inline compressed pglz 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 5 -- Length: 67 Offset: 7652 (0x1de4) Flags: NORMAL +COPY: extended compressed pglz (TOASTED,pglz) + Item 6 -- Length: 90 Offset: 7560 (0x1d88) Flags: NORMAL +COPY: inline compressed lz4 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 7 -- Length: 66 Offset: 7492 (0x1d44) Flags: NORMAL +COPY: extended compressed lz4 (TOASTED,lz4) + + +*** End of File Encountered. Last Block Read: 0 *** +\! pg_filedump -D text,text -t $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" -e 's/id: ...../id: ...../g' -e 's/ 8< .*//' + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text -t +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7492 (0x1d44) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7440 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8148 (0x1fd4) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7892 (0x1ed4) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7828 (0x1e94) Flags: NORMAL + TOAST value. Raw size: 2804, external size: 2800, value id: ....., toast relation id: ....., chunks: 2 +COPY: external uncompressed 0123456789 + Item 4 -- Length: 107 Offset: 7720 (0x1e28) Flags: NORMAL +COPY: inline compressed pglz 0123456789 + Item 5 -- Length: 67 Offset: 7652 (0x1de4) Flags: NORMAL + TOAST value. Raw size: 280004, external size: 3226, value id: ....., toast relation id: ....., chunks: 2 +COPY: extended compressed pglz 0123456789 + Item 6 -- Length: 90 Offset: 7560 (0x1d88) Flags: NORMAL +COPY: inline compressed lz4 0123456789 + Item 7 -- Length: 66 Offset: 7492 (0x1d44) Flags: NORMAL + TOAST value. Raw size: 700004, external size: 2772, value id: ....., toast relation id: ....., chunks: 2 +COPY: extended compressed lz4 0123456789 + + +*** End of File Encountered. Last Block Read: 0 *** diff --git a/contrib/gs_filedump/expected/toast_4.out b/contrib/gs_filedump/expected/toast_4.out new file mode 100644 index 0000000000000000000000000000000000000000..805e23d013ca70cfc694b13d819df6824a55c524 --- /dev/null +++ b/contrib/gs_filedump/expected/toast_4.out @@ -0,0 +1,106 @@ +-- PG14+ output in toast.out/_3.out (32-bit); PG13- output in toast_1.out/_4.out +create table toast ( + description text, + data text +); +insert into toast values ('short inline', 'xxx'); +insert into toast values ('long inline uncompressed', repeat('x', 200)); +alter table toast alter column data set storage external; +insert into toast values ('external uncompressed', repeat('0123456789 8< ', 200)); +alter table toast alter column data set storage extended; +insert into toast values ('inline compressed pglz', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed pglz', repeat('0123456789 8< ', 20000)); +alter table toast alter column data set compression lz4; +ERROR: syntax error at or near "compression" +LINE 1: alter table toast alter column data set compression lz4; + ^ +insert into toast values ('inline compressed lz4', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed lz4', repeat('0123456789 8< ', 50000)); +vacuum toast; +checkpoint; +-- copy tables where client can read it +\set relname 'toast' +select oid as datoid from pg_database where datname = current_database() \gset +select relfilenode, reltoastrelid from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as loid \gset +\set output :relname '.heap' +\lo_export :loid :output +select lo_import(format('base/%s/%s', :'datoid', :'reltoastrelid')) as toast_loid \gset +\set output :reltoastrelid +\lo_export :toast_loid :output +\setenv relname :relname +\! pg_filedump -D text,text $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7476 (0x1d34) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7424 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8148 (0x1fd4) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7892 (0x1ed4) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7828 (0x1e94) Flags: NORMAL +COPY: external uncompressed (TOASTED,uncompressed) + Item 4 -- Length: 107 Offset: 7720 (0x1e28) Flags: NORMAL +COPY: inline compressed pglz 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 5 -- Length: 67 Offset: 7652 (0x1de4) Flags: NORMAL +COPY: extended compressed pglz (TOASTED,pglz) + Item 6 -- Length: 107 Offset: 7544 (0x1d78) Flags: NORMAL +COPY: inline compressed lz4 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< 0123456789 8< + Item 7 -- Length: 66 Offset: 7476 (0x1d34) Flags: NORMAL +COPY: extended compressed lz4 (TOASTED,pglz) + + +*** End of File Encountered. Last Block Read: 0 *** +\! pg_filedump -D text,text -t $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" -e 's/id: ...../id: ...../g' -e 's/ 8< .*//' + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: toast.heap +* Options used: -D text,text -t +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 52 (0x0034) + Block: Size 8192 Version 4 Upper 7476 (0x1d34) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 7 Free Space: 7424 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 52 + + ----- + Item 1 -- Length: 41 Offset: 8148 (0x1fd4) Flags: NORMAL +COPY: short inline xxx + Item 2 -- Length: 256 Offset: 7892 (0x1ed4) Flags: NORMAL +COPY: long inline uncompressed xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Item 3 -- Length: 64 Offset: 7828 (0x1e94) Flags: NORMAL + TOAST value. Raw size: 2804, external size: 2800, value id: ....., toast relation id: ....., chunks: 2 +COPY: external uncompressed 0123456789 + Item 4 -- Length: 107 Offset: 7720 (0x1e28) Flags: NORMAL +COPY: inline compressed pglz 0123456789 + Item 5 -- Length: 67 Offset: 7652 (0x1de4) Flags: NORMAL + TOAST value. Raw size: 280004, external size: 3226, value id: ....., toast relation id: ....., chunks: 2 +COPY: extended compressed pglz 0123456789 + Item 6 -- Length: 107 Offset: 7544 (0x1d78) Flags: NORMAL +COPY: inline compressed lz4 0123456789 + Item 7 -- Length: 66 Offset: 7476 (0x1d34) Flags: NORMAL + TOAST value. Raw size: 700004, external size: 8035, value id: ....., toast relation id: ....., chunks: 5 +COPY: extended compressed lz4 0123456789 + + +*** End of File Encountered. Last Block Read: 0 *** diff --git a/contrib/gs_filedump/expected/xml.out b/contrib/gs_filedump/expected/xml.out new file mode 100644 index 0000000000000000000000000000000000000000..c7c10400ed268af4ab04a95eae3649d5ec695b47 --- /dev/null +++ b/contrib/gs_filedump/expected/xml.out @@ -0,0 +1,46 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- server without --with-libxml support output in *_1.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table xml (x xml); +insert into xml values (''), (null); +\set relname xml +\ir run_test.sql +\echo Testing :relname +Testing xml +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: xml.heap +* Options used: -D xml +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8128 (0x1fc0) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8096 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 36 Offset: 8152 (0x1fd8) Flags: NORMAL +COPY: + Item 2 -- Length: 24 Offset: 8128 (0x1fc0) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/xml_1.out b/contrib/gs_filedump/expected/xml_1.out new file mode 100644 index 0000000000000000000000000000000000000000..2022565dcbca72563451ae7175502195b8664be5 --- /dev/null +++ b/contrib/gs_filedump/expected/xml_1.out @@ -0,0 +1,38 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- server without --with-libxml support output in *_1.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table xml (x xml); +insert into xml values (''), (null); +ERROR: unsupported XML feature +LINE 1: insert into xml values (''), (null); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +HINT: You need to rebuild PostgreSQL using --with-libxml. +\set relname xml +\ir run_test.sql +\echo Testing :relname +Testing xml +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: xml.heap +* Options used: -D xml +******************************************************************* +Error: Unable to read full page header from block 0. + ===> Read 0 bytes +Notice: Block size determined from reading block 0 is zero, using default 8192 instead. +Hint: Use -S to specify the size manually. +Error: Premature end of file encountered. +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/expected/xml_3.out b/contrib/gs_filedump/expected/xml_3.out new file mode 100644 index 0000000000000000000000000000000000000000..fffda67e39a296c6a60f0610f8a703aa3e35a9f9 --- /dev/null +++ b/contrib/gs_filedump/expected/xml_3.out @@ -0,0 +1,46 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- server without --with-libxml support output in *_1.out +select oid as datoid from pg_database where datname = current_database() \gset +---------------------------------------------------------------------------------------------- +create table xml (x xml); +insert into xml values (''), (null); +\set relname xml +\ir run_test.sql +\echo Testing :relname +Testing xml +vacuum :"relname"; +checkpoint; +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output +\setenv relname :relname +\! pg_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +******************************************************************* +* PostgreSQL File/Block Formatted Dump Utility +* +* File: xml.heap +* Options used: -D xml +******************************************************************* + +Block 0 ******************************************************** +
----- + Block Offset: 0x00000000 Offsets: Lower 32 (0x0020) + Block: Size 8192 Version 4 Upper 8132 (0x1fc4) + LSN: logid . recoff 0x........ Special 8192 (0x2000) + Items: 2 Free Space: 8100 + Checksum: 0x0000 Prune XID: 0x00000000 Flags: 0x0004 (ALL_VISIBLE) + Length (including item array): 32 + + ----- + Item 1 -- Length: 36 Offset: 8156 (0x1fdc) Flags: NORMAL +COPY: + Item 2 -- Length: 24 Offset: 8132 (0x1fc4) Flags: NORMAL +COPY: \N + + +*** End of File Encountered. Last Block Read: 0 *** +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/gs_desc b/contrib/gs_filedump/gs_desc new file mode 100755 index 0000000000000000000000000000000000000000..902b63390ea077aafcf0481fa20d97f9446cc182 --- /dev/null +++ b/contrib/gs_filedump/gs_desc @@ -0,0 +1,347 @@ +#!/usr/bin/python3 +import os +import re +import sys +import shutil +import subprocess +import argparse +import hashlib + +# Suggest types for gs_filedump input +def init_dist_types(): + # 初始化数据类型映射字典 + dist_types = {} + dist_types['int2'] = 'smallint' + dist_types['int4'] = 'int' + dist_types['int8'] = 'bigint' + dist_types['bpchar'] = 'text' + dist_types['varchar'] = 'text' + dist_types['text'] = 'text' + return dist_types + +def ConsumeOptions(): + # 解析命令行参数 + parser = argparse.ArgumentParser(description="Process some integers.") + parser.add_argument('-s', '--searchpath', type=str, help='Specify the search path', required=False) # 添加 -s 参数 + parser.add_argument('-n', '--namespace', type=str, nargs='+', help='Specify the namespace(s)', required=False) + parser.add_argument('-t', '--tablename', type=str, nargs='+', help='Specify the tablename(s)', required=True) # 修改: 支持多个表名 + + args = parser.parse_args() + return args.searchpath, args.namespace, args.tablename # 返回 searchpath 参数 + +# Dict init +def dict_init(dist_name, key, fmt): + # 如果键不存在于字典中,则添加键值对 + if key not in dist_name: + dist_name[key] = fmt + +# List append if not exists +def list_append(lst, value): + # 如果值不在列表中,则添加到列表 + if value not in lst: + lst.append(value) + +def calculate_md5(file_path): + # 计算文件的MD5哈希值 + hash_md5 = hashlib.md5() + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + hash_md5.update(chunk) + return hash_md5.hexdigest() + + +# 搜索path下所有filename文件,并添加到filepath中 +def search_files(path, filename, filepath): + # 搜索指定路径下的所有文件,并将符合条件的文件路径添加到filepath列表中 + # 检查路径是否存在 + if not os.path.exists(path): + print(f"Error: The path '{path}' does not exist.") + sys.exit(-1) + + # 检查路径是否有权限访问 + if not os.access(path, os.R_OK): + print(f"Error: Permission denied to access the path '{path}'.") + sys.exit(-1) + + for file in os.listdir(path): + fp = os.path.join(path, file) + if os.path.isfile(fp) and fp.endswith("/" + filename): + if fp not in filepath: + md5_value = calculate_md5(fp) + if md5_value not in [calculate_md5(p) for p in filepath]: + list_append(filepath, fp) + elif os.path.isdir(fp): + search_files(fp, filename, filepath) + +# Execute command and capture output +def execute_command(cmd): + # 执行命令并捕获输出 + try: + res = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output = res.stdout.readlines() + res.stdout.close() + return output + except Exception as e: + print(f"Error executing command: {cmd}") + print(e) + sys.exit(-1) + +def get_oid_mapping(searchpath): + # 获取OID映射字典 + map_paths = [] + oid_mapping = {} + search_files(searchpath, 'pg_filenode.map', map_paths) + for path in map_paths: + cmd = f"{gs_filedump} -m {path} | grep 'OID:'" + try: + for line in execute_command(cmd): + line_arr = line.decode('utf-8').strip().split('\t') + oid = line_arr[0].split(' ')[1] + filenode = line_arr[1].split(' ')[1] + oid_mapping[oid] = filenode + except Exception as e: + print(f"Error processing filenode map at {path}") + print(e) + return oid_mapping + +def get_pg_class_info(searchpath, user_tables, oid_mapping): + # 获取pg_class表的信息 + pg_tables = ["pg_database", "pg_namespace", "pg_class", "pg_attribute", "pg_type", "pg_toast_"] + tables = user_tables + pg_tables + table_regex = "|".join(tables) + pattern = r'([a-zA-Z]*)=([a-zA-Z]*)' + + pg_class_paths = [] + table_info_list = [] + + pg_class_filenode = oid_mapping.get("1259") # get pg_class filenode + if pg_class_filenode is None: + print("Error: pg_class OID not found in filenode map.") + sys.exit(-1) + search_files(searchpath, pg_class_filenode, pg_class_paths) + grep_regex = f" |grep -iE 'oid:|copy:' | grep -B1 -iE '{table_regex}' | grep -v '-' | awk '{{print $2,$3,$4,$8,$13,$NF}}' |xargs -n11 | awk '{{if($8 != 0){{print $5,$6,$7,$9,$10,$11}}}}'" + for path in pg_class_paths: + cmd = f"{gs_filedump} -io -D name,oid,oid,oid,oid,oid,oid,oid,float8,float8,int,oid,oid,oid,oid,oid,oid,bool,bool,char,char,smallint,smallint,bool,bool,bool,bool,bool,char,bool,bool,char,int,text,text,~ {path} {grep_regex}" + try: + for line in execute_command(cmd): + line_arr = line.decode('utf-8').strip().split(' ') + if (line_arr[5] != 'N'): + matches = re.findall(pattern, line_arr[5]) + reloptions = ', '.join(f'{key}={value}' for key, value in matches) + else: + reloptions = 'N' + + # print(line_arr[1], reloptions) + table_info = {'oid': line_arr[0], 'relname': line_arr[1], 'relnamespace': line_arr[2], 'relfilenode': line_arr[3], 'reltoastrelid': line_arr[4], 'reloptions': reloptions} + list_append(table_info_list, table_info) + except Exception as e: + print(f"Error processing pg_class at {path}") + print(e) + return table_info_list + +def get_namespace_mapping(searchpath, table_info_list, oid_mapping): + # 获取命名空间映射字典 + pg_namespace_paths = [] + namespace_mapping = {} + for item in table_info_list: + if item['relname'] == 'pg_namespace': + search_files(searchpath, item['relfilenode'], pg_namespace_paths) + for path in pg_namespace_paths: + cmd = f"{gs_filedump} -i -D name,~ {path} | grep -iE 'oid:|copy:' | awk '{{print $NF}}' | xargs -n2 " + try: + for line in execute_command(cmd): + line_arr = line.decode('utf-8').strip().split('\t') + oid = int(line_arr[0].split()[0]) + nsp = line_arr[0].split()[1] + if oid not in namespace_mapping: + namespace_mapping[oid] = nsp + except Exception as e: + print(f"Error processing namespace at {path}") + print(e) + return namespace_mapping + +def get_attribute_info(searchpath, table_info_list, oid_mapping, result_table_dict): + # 获取属性信息 + pg_attribute_paths = [] + column_info_dict = {} + dropped_column_dict = {} + type_oids = [] + max_column_name_length = 0 + + if not result_table_dict: + return column_info_dict, dropped_column_dict, type_oids + + table_oids_regex = '|'.join(str(i) for i in result_table_dict.keys()) + + for item in table_info_list: + if item['relname'] == 'pg_attribute': + search_files(searchpath, oid_mapping.get(item['oid']), pg_attribute_paths) + for path in pg_attribute_paths: + cmd = f"{gs_filedump} -o -D oid,name,oid,int,smallint,smallint,~ {path} | grep -i COPY | grep -E '{table_oids_regex}' | uniq" + try: + for line in execute_command(cmd): + line_arr = line.decode('utf-8').strip().split('\t') + attrelid, attname, atttypid, attlen, attnum = int(line_arr[0].split()[1]), line_arr[1], int(line_arr[2]), int(line_arr[4]), int(line_arr[5]) + dict_init(column_info_dict, attrelid, {}) + dict_init(dropped_column_dict, attrelid, {}) + if attnum > 0: + if atttypid > 0: + column_info_dict[attrelid][attnum] = [attname, atttypid, attlen] + list_append(type_oids, atttypid) + max_column_name_length = max(max_column_name_length, len(attname)) + else: + dropped_column_dict[attrelid][attnum] = [attname, atttypid, attlen] + except Exception as e: + print(f"Error processing attributes at {path}") + print(e) + + # 删除掉多匹配出的表 + for oid in list(column_info_dict.keys()): + if str(oid) not in result_table_dict.keys(): + del column_info_dict[oid] + del dropped_column_dict[oid] + + return column_info_dict, dropped_column_dict, type_oids, max_column_name_length + +def get_type_info(searchpath, table_info_list, oid_mapping, type_oids): + # 获取类型信息 + type_info_dict = {} + pg_type_paths = [] + oid_regex = '|'.join(f'OID: {oid}$' for oid in type_oids) + for item in table_info_list: + if item['relname'] == 'pg_type': + search_files(searchpath, oid_mapping.get(item['oid']), pg_type_paths) + + for path in pg_type_paths: + cmd = f"{gs_filedump} -i -D name,~ {path} | grep -EA 5 '{oid_regex}' | grep -E 'OID|COPY' | grep -v infomask | awk '{{print $NF}}' | xargs -n 2" + try: + for line in execute_command(cmd): + line_arr = line.decode('utf-8').strip().split(' ') + type_info_dict[int(line_arr[0])] = line_arr[1] + except Exception as e: + print(f"Error processing types at {path}") + print(e) + return type_info_dict + +def format_table(column_info_dict, dropped_column_dict, type_info_dict, max_column_name_length, namespace_mapping, table_info_list, oid_mapping): + # 格式化并打印表信息 + dist_suggest_type = init_dist_types() + max_column_name_length = max_column_name_length if max_column_name_length > 10 else 10 + format_string = f"{{:<{max_column_name_length + 4}}} | {{}}" + for oid, columns in column_info_dict.items(): + types = [] + table_info = {} + for table in table_info_list: + if int(table['oid']) == int(oid): + table_info = table.copy() + for table in table_info_list: + if (int(table['oid']) == int(table_info['reltoastrelid'])): + table_info['reltoastrelid'] = table['relfilenode'] + break + if not table_info: + continue + print(f"\tTable \"{namespace_mapping[int(table_info['relnamespace'])]}.{table_info['relname']}\"") + print(format_string.format("Column Name", "Type")) + print('-' * (max_column_name_length + 5) + '+' + '-' * 8) + for i in range(1, max(columns.keys()) + 1): + if i in columns: + print(format_string.format(columns[i][0], type_info_dict[columns[i][1]])) + types.append(dist_suggest_type.get(type_info_dict[columns[i][1]], type_info_dict[columns[i][1]])) + elif i in dropped_column_dict[oid]: + print(dropped_column_dict[oid][i][0]) + else: + print(format_string.format("-", "-")) + + print(f"\nOID: {oid}, Relname.Relfilenode: {table_info['relfilenode']}, Toast.Relfilenode: {table_info['reltoastrelid']}") + table_paths = [] + + search_files(searchpath, table_info['relfilenode'], table_paths) + table_id = oid_mapping.get(str(oid), 0) + search_files(searchpath, str(table_id), table_paths) + + ustore_on = 'type=ustore' in table_info['reloptions'].lower() + segment_on = 'segment=on' in table_info['reloptions'].lower() + + if segment_on: + search_files(searchpath, "1", table_paths) + print("Suggest Query Type: \n -r {} [-T {}] -D {}".format(table_info['relfilenode'], table_info['reltoastrelid'], ','.join(types))) + print("Location of Binary file: \n {}\n".format('\n '.join(p for p in table_paths))) + else: + print("Suggest Query Type: \n {} -D {}".format('-u' if ustore_on else '', ','.join(types))) + print("Location of Binary file : \n {}\n".format('\n '.join(p for p in table_paths))) + print("Options: {}\n".format(table_info['reloptions'])) + + + +def main(searchpath, namespaces, table_names): + # 主函数,执行主要逻辑 + oid_mapping = get_oid_mapping(searchpath) # get dict of mapping + if not oid_mapping: + print(f"Error: No mapping found in path `{searchpath}`.") + sys.exit(-1) + + table_info_list = get_pg_class_info(searchpath, table_names, oid_mapping) # get list of pg_class + namespace_mapping = get_namespace_mapping(searchpath, table_info_list, oid_mapping) # get list of namespace + + namespace_oids = [] + if namespaces: + for ns in namespaces: + if ns in namespace_mapping.values(): + for k, v in namespace_mapping.items(): + if v == ns: + namespace_oids.append(k) + + result_table_dict = {} + for item in table_info_list: + if namespace_oids: + if item['relname'] in table_names and (int(item['relnamespace']) in namespace_oids): + result_table_dict[item['oid']] = item + elif item['relname'] in table_names: + result_table_dict[item['oid']] = item + + if not result_table_dict: + print(f"Error: Not found table `{table_names}` in namespace `{namespaces}`.") + sys.exit(-1) + + column_info_dict, dropped_column_dict, type_oids, max_column_name_length = get_attribute_info(searchpath, table_info_list, oid_mapping, result_table_dict) + type_info_dict = get_type_info(searchpath, table_info_list, oid_mapping, type_oids) + format_table(column_info_dict, dropped_column_dict, type_info_dict, max_column_name_length, namespace_mapping, table_info_list, oid_mapping) + + # print tables Not Found + tables_not_found = table_names + for t in result_table_dict.values(): + if t['relname'] in tables_not_found: + tables_not_found.remove(t['relname']) + + print("@"*10) + if tables_not_found: + print(f"Not found table(s): {tables_not_found}") + else: + print("All table(s) found.") + print("@"*10) + + +if __name__ == "__main__": + # 程序入口 + exename = "gs_filedump" + gs_filedump = shutil.which(exename) + if gs_filedump is None: + print(f"Command {exename} not found at env $PATH.") + sys.exit(-1) + + try: + searchpath, namespaces, tablenames = ConsumeOptions() # 获取 searchpath 参数 + if not tablenames: + print("Error: -t (tablename) is required.") + sys.exit(-1) + + if not searchpath: + searchpath = os.environ.get('PGDATA') + + print("*" * 50 + "\n*") + print(f"* \t Namespaces: {namespaces}, Tables: {tablenames}") + print("*\n" + "*" * 50) + main(searchpath, namespaces, tablenames) + + except SystemExit: + pass diff --git a/contrib/gs_filedump/gs_filedump.cpp b/contrib/gs_filedump/gs_filedump.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6720b5d49df55a8c48ffb72ae619061a0217bbb8 --- /dev/null +++ b/contrib/gs_filedump/gs_filedump.cpp @@ -0,0 +1,3273 @@ +/* + * gs_filedump.c - PostgreSQL file dump utility for dumping and + * formatting heap (data), index and control files. + * + * Copyright (c) 2002-2010 Red Hat, Inc. + * Copyright (c) 2011-2023, PostgreSQL Global Development Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Original Author: Patrick Macdonald + */ + +#include "gs_filedump.h" + +#include + +#include "storage/checksum.h" +#include "storage/checksum_impl.h" +#include "storage/smgr/segment_internal.h" +#include "storage/smgr/segment.h" +#include "decode.h" +#include "segment.h" + +/* + * Global variables for ease of use mostly + */ +/* Options for Block formatting operations */ +unsigned int blockOptions = 0; + +/* Segment-related options */ +unsigned int segmentOptions = 0; + +/* -R[start]:Block range start */ +int blockStart = -1; + +/* -R[end]:Block range end */ +int blockEnd = -1; + +/* table relfilenode -r */ +int tableRelfilenode = -1; + +/* table toast relfilenode -T */ +int toastRelfilenode = -1; + +/* Options for Item formatting operations */ +unsigned int itemOptions = 0; + +/* Options for Control File formatting operations */ +unsigned int controlOptions = 0; + +unsigned int specialType = SPEC_SECT_NONE; + +static bool verbose = false; + +/* File to dump or format */ +FILE *fp = NULL; +FILE *fp_toast = NULL; + +/* File name for display */ +char *fileName = NULL; + +/* Current block size */ +static unsigned int blockSize = 0; + +/* Segment size in bytes */ +static unsigned int segmentSize = RELSEG_SIZE * BLCKSZ; + +/* Number of current segment */ +static unsigned int segmentNumber = 0; + +/* Offset of current block */ +static unsigned int pageOffset = 0; + +/* Number of bytes to format */ +static unsigned int bytesToFormat = 0; + +/* Block version number */ +static unsigned int blockVersion = 0; + +/* Flag to indicate pg_filenode.map file */ +static bool isRelMapFile = false; + +/* Program exit code */ +static int exitCode = 0; + +bool isUHeap; + +bool isSegment; + +/* + * Function Prototypes + */ + +static void DisplayOptions(unsigned int validOptions); +static unsigned int ConsumeOptions(int numOptions, char **options); +static int GetOptionValue(char *optionString); +static void FormatBlock(unsigned int blockOptions, unsigned int controlOptions, char *buffer, BlockNumber currentBlock, + unsigned int blockSize, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead); +static unsigned int GetSpecialSectionType(char *buffer, Page page); +static bool IsBtreeMetaPage(Page page); +static void CreateDumpFileHeader(int numOptions, char **options); +static int FormatHeader(char *buffer, Page page, BlockNumber blkno, bool isToast); +static int FormatUHeapHeader(char *buffer, Page page, BlockNumber blkno, bool isToast); +static void FormatItemBlock(char *buffer, Page page, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead); +static void FormatUHeapItemBlock(char *buffer, Page page, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead); +static void FormatItem(char *buffer, unsigned int numBytes, unsigned int startIndex, unsigned int formatAs); +static void FormatUHeapItem(char *buffer, unsigned int numBytes, unsigned int startIndex, unsigned int formatAs); +static void FormatSpecial(char *buffer); +static void FormatUHeapSpecial(char *buffer); +static void FormatControl(char *buffer); +static void FormatBinary(char *buffer, unsigned int numBytes, unsigned int startIndex); +static void DumpBinaryBlock(char *buffer); +static int PrintRelMappings(void); + +static const char *wal_level_str(WalLevel wal_level) +{ + switch (wal_level) { + case WAL_LEVEL_MINIMAL: + return "minimal"; + case WAL_LEVEL_ARCHIVE: + return "archive"; + case WAL_LEVEL_HOT_STANDBY: + return "hot_standby"; + case WAL_LEVEL_LOGICAL: + return "logical"; + default: + break; + } + return _("unrecognized wal_level"); +} + +/* Send properly formed usage information to the user. */ +static void DisplayOptions(unsigned int validOptions) +{ + if (validOptions == OPT_RC_COPYRIGHT) { + printf("\nVersion %s (for %s)" + "\nCopyright (c) 2002-2010 Red Hat, Inc." + "\nCopyright (c) 2011-2023, PostgreSQL Global Development Group\n", + FD_VERSION, FD_PG_VERSION); + } + + printf("\nUsage: gs_filedump [-abcdfhikuxy] [-r relfilenode] [-T reltoastrelid] [-R startblock [endblock]] [-D " + "attrlist] [-S blocksize] [-s segsize] " + "[-n segnumber] file\n\n" + "Display formatted contents of a PostgreSQL heap/index/control file\n" + "Defaults are: relative addressing, range of the entire file, block\n" + " size as listed on block 0 in the file\n\n" + "The following options are valid for heap and index files:\n" + " -a Display absolute addresses when formatting (Block header\n" + " information is always block relative)\n" + " -b Display binary block images within a range (Option will turn\n" + " off all formatting options)\n" + " -d Display formatted block content dump (Option will turn off\n" + " all other formatting options)\n" + " -D Decode tuples using given comma separated list of types\n" + " Supported types:\n" + " bigint bigserial bool char charN date float float4 float8 int\n" + " json macaddr name numeric oid real serial smallint smallserial text\n" + " time timestamp timestamptz timetz uuid varchar varcharN xid xml\n" + " ~ ignores all attributes left in a tuple\n" + " -f Display formatted block content dump along with interpretation\n" + " -h Display this information\n" + " -i Display interpreted item details\n" + " -k Verify block checksums\n" + " -o Do not dump old values.\n" + " -R Display specific block ranges within the file (Blocks are\n" + " indexed from 0)\n" + " [startblock]: block to start at\n" + " [endblock]: block to end at\n" + " A startblock without an endblock will format the single block\n" + " -s Force segment size to [segsize]\n" + " -u Decode block which storage type is ustore\n" + " -t Dump TOAST files\n" + " -v Ouput additional information about TOAST relations\n" + " -n Force segment number to [segnumber]\n" + " -S Force block size to [blocksize]\n" + " -x Force interpreted formatting of block items as index items\n" + " -y Force interpreted formatting of block items as heap items\n\n" + "The following options are valid for segment storage table:\n" + " When specifying a segmented storage table, the file path must be specified as '{filedir}/1'\n" + " -r Specify the relfilenode [relfilenode] of the table \n" + " -T Specify the relfilenode [reltoastrelid] of the pg_toast of the table\n" + " Parameter '-t' will not support\n" + "The following options are valid for control files:\n" + " -c Interpret the file listed as a control file\n" + " -f Display formatted content dump along with interpretation\n" + " -S Force block size to [blocksize]\n" + "Additional functions:\n" + " -m Interpret file as pg_filenode.map file and print contents (all\n" + " other options will be ignored)\n" + "\nReport bugs to \n"); +} + +/* + * Determine segment number by segment file name. For instance, if file + * name is /path/to/xxxx.7 procedure returns 7. Default return value is 0. + */ +static unsigned int GetSegmentNumberFromFileName(const char *fileName) +{ + int segnumOffset = strlen(fileName) - 1; + + if (segnumOffset < 0) { + return 0; + } + + while (isdigit(fileName[segnumOffset])) { + segnumOffset--; + if (segnumOffset < 0) { + return 0; + } + } + + if (fileName[segnumOffset] != '.') { + return 0; + } + + return atoi(&fileName[segnumOffset + 1]); +} + +static char *get_filename(char *path) +{ + char *filename = path; + for (char *p = path; *p != '\0'; p++) { + if IS_DIR_SEP (*p) { + filename = p + 1; + } + } + return (*filename != '\0') ? filename : NULL; +} + +/* Iterate through the provided options and set the option flags. + * An error will result in a positive rc and will force a display + * of the usage information. This routine returns enum + * optionReturnCode values. */ +static unsigned int ConsumeOptions(int numOptions, char **options) +{ + unsigned int rc = OPT_RC_VALID; + int x; + unsigned int optionStringLength; + char *optionString; + char duplicateSwitch = 0x00; + + for (x = 1; x < numOptions; x++) { + optionString = options[x]; + optionStringLength = strlen(optionString); + + /* Range is a special case where we have to consume the next 1 or 2 + * parameters to mark the range start and end */ + if ((optionStringLength == 2) && (strcmp(optionString, "-R") == 0)) { + int range = 0; + + SET_OPTION(blockOptions, BLOCK_RANGE, 'R'); + /* Only accept the range option once */ + if (rc == OPT_RC_DUPLICATE) { + break; + } + + /* Make sure there are options after the range identifier */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing range start identifier.\n"); + exitCode = 1; + break; + } + + /* + * Mark that we have the range and advance the option to what + * should be the range start. Check the value of the next + * parameter */ + optionString = options[++x]; + if ((range = GetOptionValue(optionString)) < 0) { + rc = OPT_RC_INVALID; + printf("Error: Invalid range start identifier <%s>.\n", optionString); + exitCode = 1; + break; + } + + /* The default is to dump only one block */ + blockStart = blockEnd = (unsigned int)range; + + /* We have our range start marker, check if there is an end + * marker on the option line. Assume that the last option + * is the file we are dumping, so check if there are options + * range start marker and the file */ + + if (x <= (numOptions - 3)) { + if ((range = GetOptionValue(options[x + 1])) >= 0) { + /* End range must be => start range */ + if (blockStart <= range) { + blockEnd = (unsigned int)range; + x++; + } else { + rc = OPT_RC_INVALID; + printf("Error: Requested block range start <%d> is " + "greater than end <%d>.\n", + blockStart, range); + exitCode = 1; + break; + } + } + } + } + + /* Check for the special case where the user forces a block size + * instead of having the tool determine it. This is useful if + * the header of block 0 is corrupt and gives a garbage block size */ + else if ((optionStringLength == 2) && (strcmp(optionString, "-S") == 0)) { + int localBlockSize; + + SET_OPTION(blockOptions, BLOCK_FORCED, 'S'); + /* Only accept the forced size option once */ + if (rc == OPT_RC_DUPLICATE) { + break; + } + + /* The token immediately following -S is the block size */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing block size identifier.\n"); + break; + } + + /* Next option encountered must be forced block size */ + optionString = options[++x]; + if ((localBlockSize = GetOptionValue(optionString)) > 0) { + blockSize = (unsigned int)localBlockSize; + } else { + rc = OPT_RC_INVALID; + printf("Error: Invalid block size requested <%s>.\n", optionString); + exitCode = 1; + break; + } + } + /* Check for the special case where the user forces a segment size. */ + else if ((optionStringLength == 2) && (strcmp(optionString, "-s") == 0)) { + int localSegmentSize; + + SET_OPTION(segmentOptions, SEGMENT_SIZE_FORCED, 's'); + /* Only accept the forced size option once */ + if (rc == OPT_RC_DUPLICATE) { + break; + } + + /* The token immediately following -s is the segment size */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing segment size identifier.\n"); + exitCode = 1; + break; + } + + /* Next option encountered must be forced segment size */ + optionString = options[++x]; + if ((localSegmentSize = GetOptionValue(optionString)) > 0) { + segmentSize = (unsigned int)localSegmentSize; + } else { + rc = OPT_RC_INVALID; + printf("Error: Invalid segment size requested <%s>.\n", optionString); + exitCode = 1; + break; + } + } + + else if ((optionStringLength == 2) && (strcmp(optionString, "-r") == 0)) { + int localTableRelfilenode; + /* The token immediately following -r is attrubute types string */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing table relfilenode string.\n"); + exitCode = 1; + break; + } + + /* Next option encountered must be forced segment size */ + optionString = options[++x]; + if ((localTableRelfilenode = GetOptionValue(optionString)) > 0) { + tableRelfilenode = (int)localTableRelfilenode; + } else { + rc = OPT_RC_INVALID; + printf("Error: Invalid segment size requested <%s>.\n", optionString); + exitCode = 1; + break; + } + + } + + else if ((optionStringLength == 2) && (strcmp(optionString, "-T") == 0)) { + int localToastRelfilenode; + SET_OPTION(blockOptions, BLOCK_DECODE_TOAST, 't'); + /* The token immediately following -r is attrubute types string */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing toast relfilenode string.\n"); + exitCode = 1; + break; + } + + /* Next option encountered must be forced segment size */ + optionString = options[++x]; + if ((localToastRelfilenode = GetOptionValue(optionString)) > 0) { + toastRelfilenode = (unsigned int)localToastRelfilenode; + } else { + rc = OPT_RC_INVALID; + printf("Error: Invalid segment size requested <%s>.\n", optionString); + exitCode = 1; + break; + } + + } + + /* Check for the special case where the user forces tuples decoding. */ + else if ((optionStringLength == 2) && (strcmp(optionString, "-D") == 0)) { + SET_OPTION(blockOptions, BLOCK_DECODE, 'D'); + /* Only accept the decode option once */ + if (rc == OPT_RC_DUPLICATE) { + break; + } + + /* The token immediately following -D is attrubute types string */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing attribute types string.\n"); + exitCode = 1; + break; + } + + /* Next option encountered must be attribute types string */ + optionString = options[++x]; + if (ParseAttributeTypesString(optionString) < 0) { + rc = OPT_RC_INVALID; + printf("Error: Invalid attribute types string <%s>.\n", optionString); + exitCode = 1; + break; + } + } + /* Check for the special case where the user forces a segment number + * instead of having the tool determine it by file name. */ + else if ((optionStringLength == 2) && (strcmp(optionString, "-n") == 0)) { + int localSegmentNumber; + + SET_OPTION(segmentOptions, SEGMENT_NUMBER_FORCED, 'n'); + /* Only accept the forced segment number option once */ + if (rc == OPT_RC_DUPLICATE) { + break; + } + + /* The token immediately following -n is the segment number */ + if (x >= (numOptions - 2)) { + rc = OPT_RC_INVALID; + printf("Error: Missing segment number identifier.\n"); + exitCode = 1; + break; + } + + /* Next option encountered must be forced segment number */ + optionString = options[++x]; + if ((localSegmentNumber = GetOptionValue(optionString)) > 0) { + segmentNumber = (unsigned int)localSegmentNumber; + } else { + rc = OPT_RC_INVALID; + printf("Error: Invalid segment number requested <%s>.\n", optionString); + exitCode = 1; + break; + } + } + /* The last option MUST be the file name */ + else if (x == (numOptions - 1)) { + /* Check to see if this looks like an option string before opening */ + if (optionString[0] != '-') { + char *seg_mata_file = NULL; + char *seg_toast_mata_file = NULL; + if (strcmp(get_filename(optionString), "1") == 0) { + isSegment = true; + + if (not(blockOptions & BLOCK_RANGE)) { + SET_OPTION(blockOptions, BLOCK_RANGE, 'R'); + } + if (tableRelfilenode < 0) { + rc = OPT_RC_INVALID; + printf("Error: `-r` is requested <%s>.\n", optionString); + exitCode = 1; + break; + } else if ((blockOptions & BLOCK_DECODE_TOAST) && (toastRelfilenode < 0)) { + rc = OPT_RC_INVALID; + printf("Error: `-t` 2 `-T [toastRelfilenode]` <%s>.\n", optionString); + exitCode = 1; + break; + } + + if (toastRelfilenode > 0) { + seg_toast_mata_file = slice_filename(optionString, (toastRelfilenode / DF_FILE_SLICE_BLOCKS)); + fp_toast = fopen(seg_toast_mata_file, "rb"); + if (not fp_toast) { + rc = OPT_RC_FILE; + printf("Error: Could not open file <%s>.\n", seg_toast_mata_file); + exitCode = 1; + break; + } + } + } + + if (isSegment) { + seg_mata_file = slice_filename(optionString, (tableRelfilenode / DF_FILE_SLICE_BLOCKS)); + fp = fopen(seg_mata_file, "rb"); + } else { + fp = fopen(optionString, "rb"); + } + if (fp) { + fileName = options[x]; + if (!(segmentOptions & SEGMENT_NUMBER_FORCED)) { + segmentNumber = GetSegmentNumberFromFileName(fileName); + } + } else { + rc = OPT_RC_FILE; + printf("Error: Could not open file <%s>.\n", optionString); + exitCode = 1; + break; + } + } else { + /* Could be the case where the help flag is used without a + * filename. Otherwise, the last option isn't a file */ + if (strcmp(optionString, "-h") == 0) { + rc = OPT_RC_COPYRIGHT; + } else { + rc = OPT_RC_FILE; + printf("Error: Missing file name to dump.\n"); + exitCode = 1; + } + break; + } + } else { + unsigned int y; + + /* Option strings must start with '-' and contain switches */ + if (optionString[0] != '-') { + rc = OPT_RC_INVALID; + printf("Error: Invalid option string <%s>.\n", optionString); + exitCode = 1; + break; + } + + /* Iterate through the singular option string, throw out + * garbage, duplicates and set flags to be used in formatting */ + for (y = 1; y < optionStringLength; y++) { + switch (optionString[y]) { + /* Use absolute addressing */ + case 'a': + SET_OPTION(blockOptions, BLOCK_ABSOLUTE, 'a'); + break; + + /* Dump the binary contents of the page */ + case 'b': + SET_OPTION(blockOptions, BLOCK_BINARY, 'b'); + break; + + /* Dump the listed file as a control file */ + case 'c': + SET_OPTION(controlOptions, CONTROL_DUMP, 'c'); + break; + + /* Do not interpret the data. Format to hex and ascii. */ + case 'd': + SET_OPTION(blockOptions, BLOCK_NO_INTR, 'd'); + break; + + case 'u': + SET_OPTION(blockOptions, BLOCK_USTORE, 'u'); + break; + + /* + * Format the contents of the block with + * interpretation of the headers */ + case 'f': + SET_OPTION(blockOptions, BLOCK_FORMAT, 'f'); + break; + + /* Display the usage screen */ + case 'h': + rc = OPT_RC_COPYRIGHT; + break; + + /* Format the items in detail */ + case 'i': + SET_OPTION(itemOptions, ITEM_DETAIL, 'i'); + break; + + /* Verify block checksums */ + case 'k': + SET_OPTION(blockOptions, BLOCK_CHECKSUMS, 'k'); + break; + + /* Treat file as pg_filenode.map file */ + case 'm': + isRelMapFile = true; + break; + + /* Display old values. Ignore Xmax */ + case 'o': + SET_OPTION(blockOptions, BLOCK_IGNORE_OLD, 'o'); + break; + + case 't': + SET_OPTION(blockOptions, BLOCK_DECODE_TOAST, 't'); + break; + + case 'v': + verbose = true; + break; + + /* Interpret items as standard index values */ + case 'x': + SET_OPTION(itemOptions, ITEM_INDEX, 'x'); + if (itemOptions & ITEM_HEAP) { + rc = OPT_RC_INVALID; + printf("Error: Options and are " + "mutually exclusive.\n"); + exitCode = 1; + } + break; + + /* Interpret items as heap values */ + case 'y': + SET_OPTION(itemOptions, ITEM_HEAP, 'y'); + if (itemOptions & ITEM_INDEX) { + rc = OPT_RC_INVALID; + printf("Error: Options and are " + "mutually exclusive.\n"); + exitCode = 1; + } + break; + + default: + rc = OPT_RC_INVALID; + printf("Error: Unknown option <%c>.\n", optionString[y]); + exitCode = 1; + break; + } + + if (rc) { + break; + } + } + } + } + + isUHeap = blockOptions & BLOCK_USTORE; + + if (isSegment && isUHeap) { + rc = OPT_RC_INVALID; + printf("Error: `-u` is not supported when segment is on.\n"); + exitCode = 1; + } + + if (rc == OPT_RC_DUPLICATE) { + printf("Error: Duplicate option listed <%c>.\n", duplicateSwitch); + exitCode = 1; + } + + /* If the user requested a control file dump, a pure binary + * block dump or a non-interpreted formatted dump, mask off + * all other block level options (with a few exceptions) */ + if (rc == OPT_RC_VALID) { + /* The user has requested a control file dump, only -f and */ + /* -S are valid... turn off all other formatting */ + if (controlOptions & CONTROL_DUMP) { + if ((blockOptions & ~(BLOCK_FORMAT | BLOCK_FORCED)) || (itemOptions)) { + rc = OPT_RC_INVALID; + printf("Error: Invalid options used for Control File dump.\n" + " Only options may be used with .\n"); + exitCode = 1; + } else { + controlOptions |= (blockOptions & (BLOCK_FORMAT | BLOCK_FORCED)); + blockOptions = itemOptions = 0; + } + } + /* The user has requested a binary block dump... only -R and -f + * are honoured */ + else if (blockOptions & BLOCK_BINARY) { + blockOptions &= (BLOCK_BINARY | BLOCK_RANGE | BLOCK_FORCED); + itemOptions = 0; + } + /* The user has requested a non-interpreted dump... only -a, -R + * and -f are honoured */ + else if (blockOptions & BLOCK_NO_INTR) { + blockOptions &= (BLOCK_NO_INTR | BLOCK_ABSOLUTE | BLOCK_RANGE | BLOCK_FORCED); + itemOptions = 0; + } + } + + return (rc); +} + +/* Given the index into the parameter list, convert and return the + * current string to a number if possible */ +static int GetOptionValue(char *optionString) +{ + int x; + int value = -1; + int optionStringLength = strlen(optionString); + + /* Verify the next option looks like a number */ + for (x = 0; x < optionStringLength; x++) { + if (!isdigit((int)optionString[x])) { + break; + } + } + + /* Convert the string to a number if it looks good */ + if (x == optionStringLength) { + value = atoi(optionString); + } + + return (value); +} + +/* Read the page header off of block 0 to determine the block size + * used in this file. Can be overridden using the -S option. The + * returned value is the block size of block 0 on disk */ +unsigned int GetBlockSize(FILE *fp) +{ + unsigned int localSize = 0; + int bytesRead = 0; + size_t headerSize = isUHeap ? sizeof(UHeapPageHeaderData) : sizeof(PageHeaderData); + char localCache[headerSize]; + + /* Read the first header off of block 0 to determine the block size */ + bytesRead = fread(&localCache, 1, headerSize, fp); + rewind(fp); + + if (static_cast(bytesRead) == headerSize) { + if (isUHeap) { + localSize = (unsigned int)((Size)(((UHeapPageHeader)(localCache))->pd_pagesize_version & (uint16)0xFF00)); + } else { + localSize = (unsigned int)PageGetPageSize(localCache); + } + } else { + printf("Error: Unable to read full page header from block 0.\n" + " ===> Read %u bytes\n", + bytesRead); + exitCode = 1; + } + + if (localSize == 0) { + printf("Notice: Block size determined from reading block 0 is zero, using default %d instead.\n", BLCKSZ); + printf("Hint: Use -S to specify the size manually.\n"); + localSize = BLCKSZ; + } + + return (localSize); +} + +/* Determine the contents of the special section on the block and + * return this enum value */ +static unsigned int GetSpecialSectionType(char *buffer, Page page) +{ + unsigned int rc; + unsigned int specialOffset; + unsigned int specialSize; + unsigned int specialValue; + void *pageHeader = isUHeap ? (void *)((UHeapPageHeader)page) : (void *)((PageHeader)page); + + /* If this is not a partial header, check the validity of the + * special section offset and contents */ + if (bytesToFormat > (isUHeap ? sizeof(UHeapPageHeaderData) : sizeof(PageHeaderData))) { + specialOffset = + (unsigned int)(isUHeap ? ((UHeapPageHeader)pageHeader)->pd_special : ((PageHeader)pageHeader)->pd_special); + + /* Check that the special offset can remain on the block or + * the partial block */ + if ((specialOffset == 0) || (specialOffset > blockSize) || (specialOffset > bytesToFormat)) { + rc = SPEC_SECT_ERROR_BOUNDARY; + } else { + /* we may need to examine last 2 bytes of page to identify index */ + uint16 *ptype = (uint16 *)(buffer + blockSize - sizeof(uint16)); + + specialSize = blockSize - specialOffset; + + /* If there is a special section, use its size to guess its + * contents, checking the last 2 bytes of the page in cases + * that are ambiguous. Note we don't attempt to dereference + * the pointers without checking bytesToFormat == blockSize. */ + if (specialSize == 0) { + rc = SPEC_SECT_NONE; + } else if (specialSize == MAXALIGN(sizeof(uint32))) { + /* If MAXALIGN is 8, this could be either a sequence or + * SP-GiST or GIN. */ + if (bytesToFormat == blockSize) { + specialValue = *((int *)(buffer + specialOffset)); + if (specialValue == SEQUENCE_MAGIC) { + rc = SPEC_SECT_SEQUENCE; + } else if (specialSize == MAXALIGN(sizeof(SpGistPageOpaqueData)) && *ptype == SPGIST_PAGE_ID) { + rc = SPEC_SECT_INDEX_SPGIST; + } else if (specialSize == MAXALIGN(sizeof(GinPageOpaqueData))) { + rc = SPEC_SECT_INDEX_GIN; + } else { + rc = SPEC_SECT_ERROR_UNKNOWN; + } + } else { + rc = SPEC_SECT_ERROR_UNKNOWN; + } + } + /* SP-GiST and GIN have same size special section, so check + * the page ID bytes first. */ + else if (specialSize == MAXALIGN(sizeof(SpGistPageOpaqueData)) && bytesToFormat == blockSize && + *ptype == SPGIST_PAGE_ID) { + rc = SPEC_SECT_INDEX_SPGIST; + } else if (specialSize == MAXALIGN(sizeof(GinPageOpaqueData))) { + rc = SPEC_SECT_INDEX_GIN; + } else if (specialSize > 2 && bytesToFormat == blockSize) { + /* As of 8.3, BTree, Hash, and GIST all have the same size + * special section, but the last two bytes of the section + * can be checked to determine what's what. */ + if (*ptype <= MAX_BT_CYCLE_ID && specialSize == MAXALIGN(sizeof(BTPageOpaqueData))) { + rc = SPEC_SECT_INDEX_BTREE; + } else if (*ptype == HASHO_PAGE_ID && specialSize == MAXALIGN(sizeof(HashPageOpaqueData))) { + rc = SPEC_SECT_INDEX_HASH; + } else if (*ptype == GIST_PAGE_ID && specialSize == MAXALIGN(sizeof(GISTPageOpaqueData))) { + rc = SPEC_SECT_INDEX_GIST; + } else { + rc = SPEC_SECT_ERROR_UNKNOWN; + } + } else { + rc = SPEC_SECT_ERROR_UNKNOWN; + } + } + } else { + rc = SPEC_SECT_ERROR_UNKNOWN; + } + + return (rc); +} + +/* Check whether page is a btree meta page */ +static bool IsBtreeMetaPage(Page page) +{ + if (isUHeap) { + UHeapPageHeader upageHeader = (UHeapPageHeader)page; + + if ((((int)PageGetPageSize(page) - upageHeader->pd_special) == (sizeof(UBTPageOpaqueData))) && + (bytesToFormat == blockSize)) { + UBTPageOpaque btpo = (UBTPageOpaque)((char *)page + upageHeader->pd_special); + + /* Must check the cycleid to be sure it's really btree. */ + if ((btpo->bt_internal.btpo_cycleid <= MAX_BT_CYCLE_ID) && (btpo->bt_internal.btpo_flags & BTP_META)) { + return true; + } + } + return false; + } else { + PageHeader pageHeader = (PageHeader)page; + + if ((PageGetSpecialSize(page) == (MAXALIGN(sizeof(BTPageOpaqueData)))) && (bytesToFormat == blockSize)) { + BTPageOpaque btpo = (BTPageOpaque)((char *)page + pageHeader->pd_special); + + /* Must check the cycleid to be sure it's really btree. */ + if ((btpo->bt_internal.btpo_cycleid <= MAX_BT_CYCLE_ID) && (btpo->bt_internal.btpo_flags & BTP_META)) { + return true; + } + } + return false; + } +} + +/* Check whether page is a gin meta page */ +static bool IsGinMetaPage(Page page) +{ + if ((PageGetSpecialSize(page) == (MAXALIGN(sizeof(GinPageOpaqueData)))) && (bytesToFormat == blockSize)) { + GinPageOpaque gpo = GinPageGetOpaque(page); + + if (gpo->flags & GIN_META) { + return true; + } + } + + return false; +} + +/* Check whether page is a gin leaf page */ +static bool IsGinLeafPage(Page page) +{ + if ((PageGetSpecialSize(page) == (MAXALIGN(sizeof(GinPageOpaqueData)))) && (bytesToFormat == blockSize)) { + GinPageOpaque gpo = GinPageGetOpaque(page); + + if (gpo->flags & GIN_LEAF) { + return true; + } + } + + return false; +} + +/* Check whether page is a gin leaf page */ +static bool IsUHeapGinLeafPage(Page page) +{ + if ((PageGetSpecialSize(page) == ((sizeof(GinPageOpaqueData)))) && (bytesToFormat == blockSize)) { + GinPageOpaque gpo = GinPageGetOpaque(page); + + if (gpo->flags & GIN_LEAF) { + return true; + } + } + + return false; +} + +/* Check whether page is a SpGist meta page */ +static bool IsSpGistMetaPage(Page page) +{ + if ((PageGetSpecialSize(page) == ((sizeof(SpGistPageOpaqueData)))) && (bytesToFormat == blockSize)) { + SpGistPageOpaque spgpo = SpGistPageGetOpaque(page); + + if ((spgpo->spgist_page_id == SPGIST_PAGE_ID) && (spgpo->flags & SPGIST_META)) { + return true; + } + } + + return false; +} + +/* Display a header for the dump so we know the file name, the options + * used and the time the dump was taken */ +static void CreateDumpFileHeader(int numOptions, char **options) +{ + int x; + char optionBuffer[52] = "\0"; + + /* Iterate through the options and cache them. + * The maximum we can display is 50 option characters + spaces. */ + for (x = 1; x < (numOptions - 1); x++) { + if ((strlen(optionBuffer) + strlen(options[x])) > 50) { + break; + } + strcat_s(optionBuffer, MAXOUTPUTLEN, options[x]); + if (x < numOptions - 2) { + strcat_s(optionBuffer, MAXOUTPUTLEN, " "); + } + } + + printf("\n*******************************************************************\n" + "* PostgreSQL File/Block Formatted Dump Utility\n" + "*\n" + "* File: %s\n" + "* Options used: %s\n" + "*******************************************************************\n", + fileName, (strlen(optionBuffer)) ? optionBuffer : "None"); +} + +/* Dump out a formatted block header for the requested block */ +static int FormatHeader(char *buffer, Page page, BlockNumber blkno, bool isToast) +{ + int rc = 0; + unsigned int headerBytes; + PageHeader pageHeader = (PageHeader)page; + const char *indent = isToast ? "\t" : ""; + + if (!isToast || verbose) { + printf("%s
-----\n", indent); + } + + /* Only attempt to format the header if the entire header (minus the item + * array) is available */ + if (bytesToFormat < offsetof(PageHeaderData, pd_linp[0])) { + headerBytes = bytesToFormat; + rc = EOF_ENCOUNTERED; + } else { + XLogRecPtr pageLSN = PageGetLSN(page); + unsigned int maxOffset = PageGetMaxOffsetNumber(page); + char flagString[100]; + + headerBytes = offsetof(PageHeaderData, pd_linp[0]); + blockVersion = (unsigned int)PageGetPageLayoutVersion(page); + + /* The full header exists but we have to check that the item array + * is available or how far we can index into it */ + if (maxOffset > 0) { + unsigned int itemsLength = maxOffset * sizeof(ItemIdData); + + if (bytesToFormat < (headerBytes + itemsLength)) { + headerBytes = bytesToFormat; + rc = EOF_ENCOUNTERED; + } else { + headerBytes += itemsLength; + } + } + + flagString[0] = '\0'; + if (pageHeader->pd_flags & PD_HAS_FREE_LINES) { + strcat_s(flagString, MAXOUTPUTLEN, "HAS_FREE_LINES|"); + } + if (pageHeader->pd_flags & PD_PAGE_FULL) { + strcat_s(flagString, MAXOUTPUTLEN, "PAGE_FULL|"); + } + if (pageHeader->pd_flags & PD_ALL_VISIBLE) { + strcat_s(flagString, MAXOUTPUTLEN, "ALL_VISIBLE|"); + } + if (PageIsCompressed(page)) { + strcat_s(flagString, MAXOUTPUTLEN, "COMPRESSED_PAGE|"); + } + if (PageIsLogical(page)) { + strcat_s(flagString, MAXOUTPUTLEN, "LOGICAL_PAGE|"); + } + if (PageIsEncrypt(page)) { + strcat_s(flagString, MAXOUTPUTLEN, "ENCRYPT_PAGE|"); + } + if (pageHeader->pd_flags & PD_CHECKSUM_FNV1A) { + strcat_s(flagString, MAXOUTPUTLEN, "CHECKSUM_FNV1A|"); + } + if (pageHeader->pd_flags & PD_JUST_AFTER_FPW) { + strcat_s(flagString, MAXOUTPUTLEN, "JUST_AFTER_FPW|"); + } + if (pageHeader->pd_flags & PD_EXRTO_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "EXRTO_PAGE|"); + } + if (pageHeader->pd_flags & PD_TDE_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "TDE_PAGE|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + + /* Interpret the content of the header */ + if (!isToast || verbose) { + printf("%s Block Offset: 0x%08x Offsets: Lower %4u (0x%04hx)\n", indent, pageOffset, + pageHeader->pd_lower, pageHeader->pd_lower); + printf("%s Block: Size %4d Version %4u Upper %4u (0x%04hx)\n", indent, + (int)PageGetPageSize(page), blockVersion, pageHeader->pd_upper, pageHeader->pd_upper); + printf("%s LSN: logid %6d recoff 0x%08x Special %4u (0x%04hx)\n", indent, (uint32)(pageLSN >> 32), + (uint32)pageLSN, pageHeader->pd_special, pageHeader->pd_special); + printf("%s Items: %4d Free Space: %4u\n", indent, maxOffset, + pageHeader->pd_upper - pageHeader->pd_lower); + printf("%s Checksum: 0x%04x Prune XID: 0x%08lx Flags: 0x%04x (%s)\n", indent, pageHeader->pd_checksum, + ((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base, pageHeader->pd_flags, + flagString); + printf("%s Length (including item array): %u\n\n", indent, headerBytes); + } + + /* If it's a btree meta page, print the contents of the meta block. */ + if (IsBtreeMetaPage(page)) { + BTMetaPageData *btpMeta = BTPageGetMeta(buffer); + + if (!isToast || verbose) { + printf("%s BTree Meta Data: Magic (0x%08x) Version (%u)\n", indent, btpMeta->btm_magic, + btpMeta->btm_version); + printf("%s Root: Block (%u) Level (%u)\n", indent, btpMeta->btm_root, + btpMeta->btm_level); + printf("%s FastRoot: Block (%u) Level (%u)\n\n", indent, btpMeta->btm_fastroot, + btpMeta->btm_fastlevel); + } + headerBytes += sizeof(BTMetaPageData); + } + + /* Eye the contents of the header and alert the user to possible + * problems. */ + if ((maxOffset < 0) || (maxOffset > blockSize) || (pageHeader->pd_upper > blockSize) || + (pageHeader->pd_upper > pageHeader->pd_special) || + (pageHeader->pd_lower < (sizeof(PageHeaderData) - sizeof(ItemIdData))) || + (pageHeader->pd_lower > blockSize) || (pageHeader->pd_upper < pageHeader->pd_lower) || + (pageHeader->pd_special > blockSize)) { + printf(" Error: Invalid header information.\n\n"); + exitCode = 1; + } + + if (blockOptions & BLOCK_CHECKSUMS) { + uint32 delta = (segmentSize / blockSize) * segmentNumber; + uint16 calc_checksum = pg_checksum_page(page, delta + blkno); + + if (calc_checksum != pageHeader->pd_checksum) { + printf(" Error: checksum failure: calculated 0x%04x.\n\n", calc_checksum); + exitCode = 1; + } + } + } + + /* If we have reached the end of file while interpreting the header, let + * the user know about it */ + if (rc == EOF_ENCOUNTERED) { + if (!isToast || verbose) { + printf("%s Error: End of block encountered within the header." + " Bytes read: %4u.\n\n", + indent, bytesToFormat); + } + exitCode = 1; + } + + /* A request to dump the formatted binary of the block (header, + * items and special section). It's best to dump even on an error + * so the user can see the raw image. */ + if (blockOptions & BLOCK_FORMAT) { + FormatBinary(buffer, headerBytes, 0); + } + + return (rc); +} + +/* Dump out a formatted block header for the requested block */ +static int FormatUHeapHeader(char *buffer, Page page, BlockNumber blkno, bool isToast) +{ + int rc = 0; + unsigned int headerBytes; + UHeapPageHeader upageHeader = (UHeapPageHeader)page; + const char *indent = isToast ? "\t" : ""; + + if (!isToast || verbose) { + printf("%s
-----\n", indent); + } + + /* Only attempt to format the header if the entire header (minus the item + * array) is available */ + if (bytesToFormat < offsetof(UHeapPageHeaderData, td_count)) { + headerBytes = bytesToFormat; + rc = EOF_ENCOUNTERED; + } else { + unsigned int maxOffset = UHeapPageGetMaxOffsetNumber(page); + char flagString[100]; + + headerBytes = SizeOfUHeapPageHeaderData; + blockVersion = (unsigned int)(upageHeader->pd_pagesize_version); + + /* The full header exists but we have to check that the item array + * is available or how far we can index into it */ + if (maxOffset > 0) { + unsigned int itemsLength = maxOffset * sizeof(ItemIdData); + + if (bytesToFormat < (headerBytes + itemsLength)) { + headerBytes = bytesToFormat; + rc = EOF_ENCOUNTERED; + } else { + headerBytes += itemsLength; + } + } + + flagString[0] = '\0'; + if (upageHeader->pd_flags & UHEAP_HAS_FREE_LINES) { + strcat_s(flagString, MAXOUTPUTLEN, "HAS_FREE_LINES|"); + } + if (upageHeader->pd_flags & UHEAP_PAGE_FULL) { + strcat_s(flagString, MAXOUTPUTLEN, "PAGE_FULL|"); + } + if (upageHeader->pd_flags & UHP_ALL_VISIBLE) { + strcat_s(flagString, MAXOUTPUTLEN, "ALL_VISIBLE|"); + } + if (UPageIsFull(page)) { + strcat_s(flagString, MAXOUTPUTLEN, "UPAGE_IS_FULL|"); + } + if (UPageHasFreeLinePointers(page)) { + strcat_s(flagString, MAXOUTPUTLEN, "UPAGE_HAS_FREE_LINE_POINTERS|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + + /* Interpret the content of the header */ + if (!isToast || verbose) { + printf("%s Block Offset: 0x%08x Offsets: Lower %4u (0x%04hx)\n", indent, pageOffset, + upageHeader->pd_lower, upageHeader->pd_lower); + printf("%s Block: Size %4d Version %4u Upper %4u (0x%04hx)\n", indent, + (int)PageGetPageSize(page), blockVersion, upageHeader->pd_upper, upageHeader->pd_upper); + printf("%s PD_LSN: %X/0x%08lx, Special %4u (0x%04hx)\n", indent, upageHeader->pd_lsn.xlogid, + ((uint64)upageHeader->pd_lsn.xlogid << XLOG_LSN_HIGH_OFF) + upageHeader->pd_lsn.xrecoff, + upageHeader->pd_special, upageHeader->pd_special); + printf("%s Items: %4d Free Space: %d\n", indent, maxOffset, + upageHeader->pd_upper - upageHeader->pd_lower); + printf("%s Checksum: 0x%04x Prune XID: 0x%08lx Flags: 0x%04x (%s)\n", indent, upageHeader->pd_checksum, + ((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base, + upageHeader->pd_flags, flagString); + printf("%s Length (including item array): %u\n\n", indent, headerBytes); + } + + /* If it's a btree meta page, print the contents of the meta block. */ + if (IsBtreeMetaPage(page)) { + BTMetaPageData *btpMeta = BTPageGetMeta(buffer); + + if (!isToast || verbose) { + printf("%s BTree Meta Data: Magic (0x%08x) Version (%u)\n", indent, btpMeta->btm_magic, + btpMeta->btm_version); + printf("%s Root: Block (%u) Level (%u)\n", indent, btpMeta->btm_root, + btpMeta->btm_level); + printf("%s FastRoot: Block (%u) Level (%u)\n\n", indent, btpMeta->btm_fastroot, + btpMeta->btm_fastlevel); + } + headerBytes += sizeof(BTMetaPageData); + } + + /* Eye the contents of the header and alert the user to possible + * problems. */ + if ((maxOffset < 0) || (maxOffset > blockSize) || (upageHeader->pd_upper > blockSize) || + (upageHeader->pd_upper > upageHeader->pd_special) || + (upageHeader->pd_lower < (sizeof(PageHeaderData) - sizeof(ItemIdData))) || + (upageHeader->pd_lower > blockSize) || (upageHeader->pd_upper < upageHeader->pd_lower) || + (upageHeader->pd_special > blockSize)) { + printf(" Error: Invalid header information.\n\n"); + exitCode = 1; + } + + if (blockOptions & BLOCK_CHECKSUMS) { + uint32 delta = (segmentSize / blockSize) * segmentNumber; + uint16 calc_checksum = pg_checksum_page(page, delta + blkno); + + if (calc_checksum != upageHeader->pd_checksum) { + printf(" Error: checksum failure: calculated 0x%04x.\n\n", calc_checksum); + exitCode = 1; + } + } + } + + /* If we have reached the end of file while interpreting the header, let + * the user know about it */ + if (rc == EOF_ENCOUNTERED) { + if (!isToast || verbose) { + printf("%s Error: End of block encountered within the header." + " Bytes read: %4u.\n\n", + indent, bytesToFormat); + } + exitCode = 1; + } + + /* A request to dump the formatted binary of the block (header, + * items and special section). It's best to dump even on an error + * so the user can see the raw image. */ + if (blockOptions & BLOCK_FORMAT) { + FormatBinary(buffer, headerBytes, 0); + } + + return (rc); +} + +/* Copied from ginpostinglist.c */ +#define MaxHeapTuplesPerPageBits 11 +static uint64 itemptr_to_uint64(const ItemPointer iptr) +{ + uint64 val; + + val = GinItemPointerGetBlockNumber(iptr); + val <<= MaxHeapTuplesPerPageBits; + val |= GinItemPointerGetOffsetNumber(iptr); + + return val; +} + +static void uint64_to_itemptr(uint64 val, ItemPointer iptr) +{ + GinItemPointerSetOffsetNumber(iptr, val & ((1 << MaxHeapTuplesPerPageBits) - 1)); + val = val >> MaxHeapTuplesPerPageBits; + GinItemPointerSetBlockNumber(iptr, val); +} + +/* + * Decode varbyte-encoded integer at *ptr. *ptr is incremented to next integer. + */ +static uint64 decode_varbyte(unsigned char **ptr) +{ + uint64 val; + unsigned char *p = *ptr; + uint64 c; + + /* 1st byte */ + c = *(p++); + val = c & 0x7F; + if (c & 0x80) { + /* 2nd byte */ + c = *(p++); + val |= (c & 0x7F) << 7; + if (c & 0x80) { + /* 3rd byte */ + c = *(p++); + val |= (c & 0x7F) << 14; + if (c & 0x80) { + /* 4th byte */ + c = *(p++); + val |= (c & 0x7F) << 21; + if (c & 0x80) { + /* 5th byte */ + c = *(p++); + val |= (c & 0x7F) << 28; + if (c & 0x80) { + /* 6th byte */ + c = *(p++); + val |= (c & 0x7F) << 35; + if (c & 0x80) { + /* 7th byte, should not have continuation bit */ + c = *(p++); + val |= c << 42; + Assert((c & 0x80) == 0); + } + } + } + } + } + } + + *ptr = p; + + return val; +} + +/* Dump out gin-specific content of block */ +static void FormatGinBlock(char *buffer, bool isToast, Oid toastOid, unsigned int toastExternalSize, char *toastValue, + unsigned int *toastRead) +{ + Page page = (Page)buffer; + const char *indent = isToast ? "\t" : ""; + + if (isToast && !verbose) { + return; + } + + printf("%s -----\n", indent); + + if (IsUHeapGinLeafPage(page)) { + if (GinPageIsCompressed(page)) { + GinPostingList *seg = GinDataLeafPageGetPostingList(page); + int plist_idx = 1; + Size len = GinDataLeafPageGetPostingListSize(page); + Pointer endptr = ((Pointer)seg) + len; + ItemPointer cur; + + while ((Pointer)seg < endptr) { + int item_idx = 1; + uint64 val; + unsigned char *endseg = seg->bytes + seg->nbytes; + unsigned char *ptr = seg->bytes; + + cur = &seg->first; + printf("\n%s Posting List %3d -- Length: %4u\n", indent, plist_idx, seg->nbytes); + printf("%s ItemPointer %3d -- Block Id: %4u linp Index: %4u\n", indent, item_idx, + ((uint32)((cur->ip_blkid.bi_hi << 16) | (uint16)cur->ip_blkid.bi_lo)), cur->ip_posid); + + val = itemptr_to_uint64(&seg->first); + while (ptr < endseg) { + val += decode_varbyte(&ptr); + item_idx++; + + uint64_to_itemptr(val, cur); + printf("%s ItemPointer %3d -- Block Id: %4u linp Index: %4u\n", indent, item_idx, + ((uint32)((cur->ip_blkid.bi_hi << 16) | (uint16)cur->ip_blkid.bi_lo)), cur->ip_posid); + } + + plist_idx++; + + seg = GinNextPostingListSegment(seg); + } + + } else { + int i, nitems = GinPageGetOpaque(page)->maxoff; + ItemPointer items = (ItemPointer)GinDataPageGetData(page); + + for (i = 0; i < nitems; i++) { + printf("%s ItemPointer %d -- Block Id: %u linp Index: %u\n", indent, i + 1, + ((uint32)((items[i].ip_blkid.bi_hi << 16) | (uint16)items[i].ip_blkid.bi_lo)), + items[i].ip_posid); + } + } + } else { + OffsetNumber cur, high = GinPageGetOpaque(page)->maxoff; + PostingItem *pitem = NULL; + + for (cur = FirstOffsetNumber; cur <= high; cur = OffsetNumberNext(cur)) { + pitem = GinDataPageGetPostingItem(page, cur); + printf("%s PostingItem %d -- child Block Id: (%u) Block Id: %u linp Index: %u\n", indent, cur, + ((uint32)((pitem->child_blkno.bi_hi << 16) | (uint16)pitem->child_blkno.bi_lo)), + ((uint32)((pitem->key.ip_blkid.bi_hi << 16) | (uint16)pitem->key.ip_blkid.bi_lo)), + pitem->key.ip_posid); + } + } + + printf("\n"); +} + +/* Dump out gin-specific content of block */ +static void FormatUHeapGinBlock(char *buffer, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead) +{ + Page page = (Page)buffer; + const char *indent = isToast ? "\t" : ""; + + if (isToast && !verbose) { + return; + } + + printf("%s -----\n", indent); + + if (IsGinLeafPage(page)) { + if (GinPageIsCompressed(page)) { + GinPostingList *seg = GinDataLeafPageGetPostingList(page); + int plist_idx = 1; + Size len = GinDataLeafPageGetPostingListSize(page); + Pointer endptr = ((Pointer)seg) + len; + ItemPointer cur; + + while ((Pointer)seg < endptr) { + int item_idx = 1; + uint64 val; + unsigned char *endseg = seg->bytes + seg->nbytes; + unsigned char *ptr = seg->bytes; + + cur = &seg->first; + printf("\n%s Posting List %3d -- Length: %4u\n", indent, plist_idx, seg->nbytes); + printf("%s ItemPointer %3d -- Block Id: %4u linp Index: %4u\n", indent, item_idx, + ((uint32)((cur->ip_blkid.bi_hi << 16) | (uint16)cur->ip_blkid.bi_lo)), cur->ip_posid); + + val = itemptr_to_uint64(&seg->first); + while (ptr < endseg) { + val += decode_varbyte(&ptr); + item_idx++; + + uint64_to_itemptr(val, cur); + printf("%s ItemPointer %3d -- Block Id: %4u linp Index: %4u\n", indent, item_idx, + ((uint32)((cur->ip_blkid.bi_hi << 16) | (uint16)cur->ip_blkid.bi_lo)), cur->ip_posid); + } + + plist_idx++; + + seg = GinNextPostingListSegment(seg); + } + + } else { + int i, nitems = GinPageGetOpaque(page)->maxoff; + ItemPointer items = (ItemPointer)GinDataPageGetData(page); + + for (i = 0; i < nitems; i++) { + printf("%s ItemPointer %d -- Block Id: %u linp Index: %u\n", indent, i + 1, + ((uint32)((items[i].ip_blkid.bi_hi << 16) | (uint16)items[i].ip_blkid.bi_lo)), + items[i].ip_posid); + } + } + } else { + OffsetNumber cur, high = GinPageGetOpaque(page)->maxoff; + PostingItem *pitem = NULL; + + for (cur = FirstOffsetNumber; cur <= high; cur = OffsetNumberNext(cur)) { + pitem = GinDataPageGetPostingItem(page, cur); + printf("%s PostingItem %d -- child Block Id: (%u) Block Id: %u linp Index: %u\n", indent, cur, + ((uint32)((pitem->child_blkno.bi_hi << 16) | (uint16)pitem->child_blkno.bi_lo)), + ((uint32)((pitem->key.ip_blkid.bi_hi << 16) | (uint16)pitem->key.ip_blkid.bi_lo)), + pitem->key.ip_posid); + } + } + + printf("\n"); +} + +/* Dump out formatted items that reside on this block */ +static void FormatItemBlock(char *buffer, Page page, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead) +{ + unsigned int x; + unsigned int itemSize; + unsigned int itemOffset; + unsigned int itemFlags; + ItemId itemId; + unsigned int maxOffset = PageGetMaxOffsetNumber(page); + const char *indent = isToast ? "\t" : ""; + errno_t rc; + + /* If it's a btree meta page, the meta block is where items would normally + * be; don't print garbage. */ + if (IsBtreeMetaPage(page)) { + return; + } + + /* Same as above */ + if (IsSpGistMetaPage(page)) { + return; + } + + /* Same as above */ + if (IsGinMetaPage(page)) { + return; + } + + /* Leaf pages of GIN index contain posting lists + * instead of item array. + */ + if (specialType == SPEC_SECT_INDEX_GIN) { + FormatGinBlock(buffer, isToast, toastOid, toastExternalSize, toastValue, toastRead); + return; + } + + if (!isToast || verbose) { + printf("%s -----\n", indent); + } + + /* Loop through the items on the block. Check if the block is + * empty and has a sensible item array listed before running + * through each item */ + if (maxOffset == 0) { + if (!isToast || verbose) { + printf("%s Empty block - no items listed \n\n", indent); + } + } else if ((maxOffset < 0) || (maxOffset > blockSize)) { + if (!isToast || verbose) { + printf("%s Error: Item index corrupt on block. Offset: <%d>.\n\n", indent, maxOffset); + } + exitCode = 1; + } else { + int formatAs; + char textFlags[16]; + uint32 chunkId; + unsigned int chunkSize = 0; + + /* First, honour requests to format items a special way, then + * use the special section to determine the format style */ + if (itemOptions & ITEM_INDEX) { + formatAs = ITEM_INDEX; + } else if (itemOptions & ITEM_HEAP) { + formatAs = ITEM_HEAP; + } else { + switch (specialType) { + case SPEC_SECT_INDEX_BTREE: + case SPEC_SECT_INDEX_HASH: + case SPEC_SECT_INDEX_GIST: + case SPEC_SECT_INDEX_GIN: + formatAs = ITEM_INDEX; + break; + case SPEC_SECT_INDEX_SPGIST: { + SpGistPageOpaque spgpo = (SpGistPageOpaque)((char *)page + ((PageHeader)page)->pd_special); + + if (spgpo->flags & SPGIST_LEAF) { + formatAs = ITEM_SPG_LEAF; + } else { + formatAs = ITEM_SPG_INNER; + } + } break; + default: + formatAs = ITEM_HEAP; + break; + } + } + + for (x = 1; x < (maxOffset + 1); x++) { + itemId = PageGetItemId(page, x); + itemFlags = (unsigned int)ItemIdGetFlags(itemId); + itemSize = (unsigned int)ItemIdGetLength(itemId); + itemOffset = (unsigned int)ItemIdGetOffset(itemId); + switch (itemFlags) { + case LP_UNUSED: + rc = strcpy_s(textFlags, sizeof textFlags, "UNUSED"); + securec_check(rc, "\0", "\0"); + break; + case LP_NORMAL: + rc = strcpy_s(textFlags, sizeof textFlags, "NORMAL"); + securec_check(rc, "\0", "\0"); + break; + case LP_REDIRECT: + rc = strcpy_s(textFlags, sizeof textFlags, "REDIRECT"); + securec_check(rc, "\0", "\0"); + break; + case LP_DEAD: + rc = strcpy_s(textFlags, sizeof textFlags, "DEAD"); + securec_check(rc, "\0", "\0"); + break; + default: + /* shouldn't be possible */ + rc = sprintf_s(textFlags, strlen(textFlags) + 1, "0x%02x", itemFlags); + securec_check(rc, "\0", "\0"); + break; + } + + if (!isToast || verbose) { + printf("%s Item %3u -- Length: %4u Offset: %4u (0x%04x)" + " Flags: %s\n", + indent, x, itemSize, itemOffset, itemOffset, textFlags); + } + + /* Make sure the item can physically fit on this block before + * formatting */ + if ((itemOffset + itemSize > blockSize) || (itemOffset + itemSize > bytesToFormat)) { + if (!isToast || verbose) { + printf("%s Error: Item contents extend beyond block.\n" + "%s BlockSize<%d> Bytes Read<%d> Item Start<%d>.\n", + indent, indent, blockSize, bytesToFormat, itemOffset + itemSize); + } + exitCode = 1; + } else { + HeapTupleHeader tuple_header; + TransactionId xmax; + + /* If the user requests that the items be interpreted as + * heap or index items... */ + if (itemOptions & ITEM_DETAIL) { + FormatItem(buffer, itemSize, itemOffset, formatAs); + } + + /* Dump the items contents in hex and ascii */ + if (blockOptions & BLOCK_FORMAT) { + FormatBinary(buffer, itemSize, itemOffset); + } + + /* Check if tuple was deleted */ + tuple_header = (HeapTupleHeader)(&buffer[itemOffset]); + xmax = HeapTupleHeaderGetRawXmax(page, tuple_header); + if ((blockOptions & BLOCK_IGNORE_OLD) && (xmax != 0)) { + if (!isToast || verbose) { + printf("%stuple was removed by transaction #%ld\n", indent, xmax); + } + } else if (isToast) { + ToastChunkDecode(&buffer[itemOffset], itemSize, toastOid, &chunkId, toastValue + *toastRead, + &chunkSize); + + if (!isToast || verbose) { + printf("%s Read TOAST chunk. TOAST Oid: %d, chunk id: %d, " + "chunk data size: %d\n", + indent, toastOid, chunkId, chunkSize); + } + + *toastRead += chunkSize; + + if (*toastRead >= toastExternalSize) { + break; + } + } else if ((blockOptions & BLOCK_DECODE) && (itemFlags == LP_NORMAL)) { + /* Decode tuple data */ + FormatDecode(&buffer[itemOffset], itemSize); + } + + if (!isToast && x == maxOffset) { + printf("\n"); + } + } + } + } +} + +/* Dump out formatted items that reside on this block */ +static void FormatUHeapItemBlock(char *buffer, Page page, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead) +{ + unsigned int x; + unsigned int itemSize; + unsigned int itemOffset; + unsigned int itemFlags; + RowPtr *itemId; + unsigned int maxOffset = UHeapPageGetMaxOffsetNumber(page); + const char *indent = isToast ? "\t" : ""; + errno_t rc; + + /* If it's a btree meta page, the meta block is where items would normally + * be; don't print garbage. */ + if (IsBtreeMetaPage(page)) { + return; + } + + /* Same as above */ + if (IsSpGistMetaPage(page)) { + return; + } + + /* Same as above */ + if (IsGinMetaPage(page)) { + return; + } + + /* Leaf pages of GIN index contain posting lists + * instead of item array. + */ + if (specialType == SPEC_SECT_INDEX_GIN) { + FormatUHeapGinBlock(buffer, isToast, toastOid, toastExternalSize, toastValue, toastRead); + return; + } + + if (!isToast || verbose) { + printf("%s -----\n", indent); + } + + /* Loop through the items on the block. Check if the block is + * empty and has a sensible item array listed before running + * through each item */ + if (maxOffset == 0) { + if (!isToast || verbose) { + printf("%s Empty block - no items listed \n\n", indent); + } + } else if ((maxOffset < 0) || (maxOffset > blockSize)) { + if (!isToast || verbose) { + printf("%s Error: Item index corrupt on block. Offset: <%d>.\n\n", indent, maxOffset); + } + exitCode = 1; + } else { + int formatAs; + char textFlags[16]; + uint32 chunkId; + unsigned int chunkSize = 0; + + /* First, honour requests to format items a special way, then + * use the special section to determine the format style */ + if (itemOptions & ITEM_INDEX) { + formatAs = ITEM_INDEX; + } else if (itemOptions & ITEM_HEAP) { + formatAs = ITEM_HEAP; + } else { + switch (specialType) { + case SPEC_SECT_INDEX_BTREE: + case SPEC_SECT_INDEX_HASH: + case SPEC_SECT_INDEX_GIST: + case SPEC_SECT_INDEX_GIN: + formatAs = ITEM_INDEX; + break; + case SPEC_SECT_INDEX_SPGIST: { + SpGistPageOpaque spgpo = (SpGistPageOpaque)((char *)page + ((UHeapPageHeader)page)->pd_special); + + if (spgpo->flags & SPGIST_LEAF) { + formatAs = ITEM_SPG_LEAF; + } else { + formatAs = ITEM_SPG_INNER; + } + } break; + default: + formatAs = ITEM_HEAP; + break; + } + } + + for (x = 1; x < (maxOffset + 1); x++) { + itemId = UPageGetRowPtr(page, x); + itemFlags = (unsigned int)itemId->flags; + itemSize = (unsigned int)itemId->len; + itemOffset = RowPtrGetOffset(itemId); + + switch (itemFlags) { + case RP_UNUSED: + rc = strcpy_s(textFlags, sizeof textFlags, "UNUSED"); + securec_check(rc, "\0", "\0"); + break; + case RP_NORMAL: + rc = strcpy_s(textFlags, sizeof textFlags, "NORMAL"); + securec_check(rc, "\0", "\0"); + break; + case RP_REDIRECT: + rc = strcpy_s(textFlags, sizeof textFlags, "REDIRECT"); + securec_check(rc, "\0", "\0"); + break; + case RP_DEAD: + rc = strcpy_s(textFlags, sizeof textFlags, "DEAD"); + securec_check(rc, "\0", "\0"); + break; + default: + /* shouldn't be possible */ + rc = sprintf_s(textFlags, strlen(textFlags) + 1, "0x%02x", itemFlags); + securec_check(rc, "\0", "\0"); + break; + } + + if (!isToast || verbose) { + printf("%s Item %3u -- Length: %4u Offset: %4u (0x%04x)" + " Flags: %s\n", + indent, x, itemSize, itemOffset, itemOffset, textFlags); + } + + /* Make sure the item can physically fit on this block before + * formatting */ + if ((itemOffset + itemSize > blockSize) || (itemOffset + itemSize > bytesToFormat)) { + if (!isToast || verbose) { + printf("%s Error: Item contents extend beyond block.\n" + "%s BlockSize<%d> Bytes Read<%d> Item Start<%d>.\n", + indent, indent, blockSize, bytesToFormat, itemOffset + itemSize); + } + exitCode = 1; + } else { + UHeapDiskTuple utuple_header; + TransactionId xmax; + + /* If the user requests that the items be interpreted as + * heap or index items... */ + if (itemOptions & ITEM_DETAIL) { + FormatUHeapItem(buffer, itemSize, itemOffset, formatAs); + } + + /* Dump the items contents in hex and ascii */ + if (blockOptions & BLOCK_FORMAT) { + FormatBinary(buffer, itemSize, itemOffset); + } + + /* Check if tuple was deleted */ + utuple_header = (UHeapDiskTuple)(&buffer[itemOffset]); + xmax = UHEAP_XID_IS_TRANS(utuple_header->flag); + if ((blockOptions & BLOCK_IGNORE_OLD) && (xmax == 0)) { + if (!isToast || verbose) { + printf("%stuple was removed by transaction.\n", indent); + } + } else if (isToast) { + ToastChunkDecode(&buffer[itemOffset], itemSize, toastOid, &chunkId, toastValue + *toastRead, + &chunkSize); + + if (!isToast || verbose) { + printf("%s Read TOAST chunk. TOAST Oid: %d, chunk id: %d, " + "chunk data size: %d\n", + indent, toastOid, chunkId, chunkSize); + } + + *toastRead += chunkSize; + + if (*toastRead >= toastExternalSize) { + break; + } + } else if ((blockOptions & BLOCK_DECODE) && (itemFlags == LP_NORMAL)) { + /* Decode tuple data */ + FormatDecode(&buffer[itemOffset], itemSize); + } + + if (!isToast && x == maxOffset) { + printf("\n"); + } + } + } + } +} + +/* Interpret the contents of the item based on whether it has a special + * section and/or the user has hinted */ +static void FormatItem(char *buffer, unsigned int numBytes, unsigned int startIndex, unsigned int formatAs) +{ + static const char *const spgist_tupstates[4] = {"LIVE", "REDIRECT", "DEAD", "PLACEHOLDER"}; + + if (formatAs == ITEM_INDEX) { + /* It is an IndexTuple item, so dump the index header */ + if (numBytes < sizeof(ItemPointerData)) { + if (numBytes) { + printf(" Error: This item does not look like an index item.\n"); + exitCode = 1; + } + } else { + IndexTuple itup = (IndexTuple)(&(buffer[startIndex])); + + printf(" Block Id: %u linp Index: %u Size: %d\n" + " Has Nulls: %u Has Varwidths: %u\n\n", + ((uint32)((itup->t_tid.ip_blkid.bi_hi << 16) | (uint16)itup->t_tid.ip_blkid.bi_lo)), + itup->t_tid.ip_posid, (int)IndexTupleSize(itup), IndexTupleHasNulls(itup) ? 1 : 0, + IndexTupleHasVarwidths(itup) ? 1 : 0); + + if (numBytes != IndexTupleSize(itup)) { + printf(" Error: Item size difference. Given <%u>, " + "Internal <%d>.\n", + numBytes, (int)IndexTupleSize(itup)); + exitCode = 1; + } + } + } else if (formatAs == ITEM_SPG_INNER) { + /* It is an SpGistInnerTuple item, so dump the index header */ + if (numBytes < SGITHDRSZ) { + if (numBytes) { + printf(" Error: This item does not look like an SPGiST item.\n"); + exitCode = 1; + } + } else { + SpGistInnerTuple itup = (SpGistInnerTuple)(&(buffer[startIndex])); + + printf(" State: %s allTheSame: %d nNodes: %u prefixSize: %u\n\n", spgist_tupstates[itup->tupstate], + itup->allTheSame, itup->nNodes, itup->prefixSize); + + if (numBytes != itup->size) { + printf(" Error: Item size difference. Given <%u>, " + "Internal <%d>.\n", + numBytes, (int)itup->size); + exitCode = 1; + } else if (itup->prefixSize == MAXALIGN(itup->prefixSize)) { + int i; + SpGistNodeTuple node; + + /* Dump the prefix contents in hex and ascii */ + if ((blockOptions & BLOCK_FORMAT) && SGITHDRSZ + itup->prefixSize <= numBytes) { + FormatBinary(buffer, SGITHDRSZ + itup->prefixSize, startIndex); + } + + /* Try to print the nodes, but only while pointer is sane */ + SGITITERATE(itup, i, node) + { + int off = (char *)node - (char *)itup; + + if (off + SGNTHDRSZ > numBytes) { + break; + } + printf(" Node %2u: Downlink: %u/%u Size: %d Null: %u\n", i, + ((uint32)((node->t_tid.ip_blkid.bi_hi << 16) | (uint16)node->t_tid.ip_blkid.bi_lo)), + node->t_tid.ip_posid, (int)IndexTupleSize(node), IndexTupleHasNulls(node) ? 1 : 0); + /* Dump the node's contents in hex and ascii */ + if ((blockOptions & BLOCK_FORMAT) && off + IndexTupleSize(node) <= numBytes) { + FormatBinary(buffer, IndexTupleSize(node), startIndex + off); + } + if (IndexTupleSize(node) != MAXALIGN(IndexTupleSize(node))) { + break; + } + } + } + printf("\n"); + } + } else if (formatAs == ITEM_SPG_LEAF) { + /* It is an SpGistLeafTuple item, so dump the index header */ + if (numBytes < SGLTHDRSZ) { + if (numBytes) { + printf(" Error: This item does not look like an SPGiST item.\n"); + exitCode = 1; + } + } else { + SpGistLeafTuple itup = (SpGistLeafTuple)(&(buffer[startIndex])); + + printf(" State: %s nextOffset: %u Block Id: %u linp Index: %u\n\n", spgist_tupstates[itup->tupstate], + itup->nextOffset, + ((uint32)((itup->heapPtr.ip_blkid.bi_hi << 16) | (uint16)itup->heapPtr.ip_blkid.bi_lo)), + itup->heapPtr.ip_posid); + + if (numBytes != itup->size) { + printf(" Error: Item size difference. Given <%u>, " + "Internal <%d>.\n", + numBytes, (int)itup->size); + exitCode = 1; + } + } + } else { + /* It is a HeapTuple item, so dump the heap header */ + unsigned int alignedSize = MAXALIGN(sizeof(HeapTupleHeaderData)); + + if (numBytes < alignedSize) { + if (numBytes) { + printf(" Error: This item does not look like a heap item.\n"); + exitCode = 1; + } + } else { + char flagString[256]; + unsigned int x; + unsigned int bitmapLength = 0; + unsigned int oidLength = 0; + unsigned int computedLength; + unsigned int infoMask; + unsigned int infoMask2; + int localNatts; + unsigned int localHoff; + bits8 *localBits; + unsigned int localBitOffset; + + HeapTupleHeader htup = (HeapTupleHeader)(&buffer[startIndex]); + TupleDesc tdup = (TupleDesc)(&buffer[startIndex]); + + infoMask = htup->t_infomask; + infoMask2 = htup->t_infomask2; + localBits = &(htup->t_bits[0]); + localNatts = HeapTupleHeaderGetNatts(htup, tdup); + localHoff = htup->t_hoff; + localBitOffset = offsetof(HeapTupleHeaderData, t_bits); + + printf(" XMIN: %lu XMAX: %u CID|XVAC: %u", HeapTupleHeaderGetXmin_tuple(htup), + htup->t_choice.t_heap.t_xmax, HeapTupleHeaderGetRawCommandId(htup)); + + if (infoMask & HEAP_HASOID) { + printf(" OID: %u", HeapTupleHeaderGetOid(htup)); + } + printf("\n" + " Block Id: %u linp Index: %u Attributes: %d Size: %d\n", + ((uint32)((htup->t_ctid.ip_blkid.bi_hi << 16) | (uint16)htup->t_ctid.ip_blkid.bi_lo)), + htup->t_ctid.ip_posid, localNatts, htup->t_hoff); + + /* Place readable versions of the tuple info mask into a buffer. + * Assume that the string can not expand beyond 256. */ + flagString[0] = '\0'; + if (infoMask & HEAP_HASNULL) { + strcat_s(flagString, MAXOUTPUTLEN, "HASNULL|"); + } + if (infoMask & HEAP_HASVARWIDTH) { + strcat_s(flagString, MAXOUTPUTLEN, "HASVARWIDTH|"); + } + if (infoMask & HEAP_HASEXTERNAL) { + strcat_s(flagString, MAXOUTPUTLEN, "HASEXTERNAL|"); + } + if (infoMask & HEAP_HASOID) { + strcat_s(flagString, MAXOUTPUTLEN, "HASOID|"); + } + if (infoMask & HEAP_XMAX_KEYSHR_LOCK) { + strcat_s(flagString, MAXOUTPUTLEN, "XMAX_KEYSHR_LOCK|"); + } + if (infoMask & HEAP_COMBOCID) { + strcat_s(flagString, MAXOUTPUTLEN, "COMBOCID|"); + } + if (infoMask & HEAP_XMAX_EXCL_LOCK) { + strcat_s(flagString, MAXOUTPUTLEN, "XMAX_EXCL_LOCK|"); + } + if (infoMask & HEAP_XMAX_LOCK_ONLY) { + strcat_s(flagString, MAXOUTPUTLEN, "XMAX_LOCK_ONLY|"); + } + if (infoMask & HEAP_XMIN_COMMITTED) { + strcat_s(flagString, MAXOUTPUTLEN, "XMIN_COMMITTED|"); + } + if (infoMask & HEAP_XMIN_INVALID) { + strcat_s(flagString, MAXOUTPUTLEN, "XMIN_INVALID|"); + } + if (infoMask & HEAP_XMAX_COMMITTED) { + strcat_s(flagString, MAXOUTPUTLEN, "XMAX_COMMITTED|"); + } + if (infoMask & HEAP_XMAX_INVALID) { + strcat_s(flagString, MAXOUTPUTLEN, "XMAX_INVALID|"); + } + if (infoMask & HEAP_XMAX_IS_MULTI) { + strcat_s(flagString, MAXOUTPUTLEN, "XMAX_IS_MULTI|"); + } + if (infoMask & HEAP_UPDATED) { + strcat_s(flagString, MAXOUTPUTLEN, "UPDATED|"); + } + if (infoMask & HEAP_MOVED_OFF) { + strcat_s(flagString, MAXOUTPUTLEN, "MOVED_OFF|"); + } + if (infoMask & HEAP_MOVED_IN) { + strcat_s(flagString, MAXOUTPUTLEN, "MOVED_IN|"); + } + + if (infoMask2 & HEAP_KEYS_UPDATED) { + strcat_s(flagString, MAXOUTPUTLEN, "KEYS_UPDATED|"); + } + if (infoMask2 & HEAP_HOT_UPDATED) { + strcat_s(flagString, MAXOUTPUTLEN, "HOT_UPDATED|"); + } + if (infoMask2 & HEAP_ONLY_TUPLE) { + strcat_s(flagString, MAXOUTPUTLEN, "HEAP_ONLY|"); + } + + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + + printf(" infomask: 0x%04x (%s) \n", infoMask, flagString); + + /* As t_bits is a variable length array, determine the length of + * the header proper */ + if (infoMask & HEAP_HASNULL) { + bitmapLength = BITMAPLEN(localNatts); + } else { + bitmapLength = 0; + } + + if (infoMask & HEAP_HASOID) { + oidLength += sizeof(Oid); + } + computedLength = MAXALIGN(localBitOffset + bitmapLength + oidLength); + + /* Inform the user of a header size mismatch or dump the t_bits + * array */ + if (computedLength != localHoff) { + printf(" Error: Computed header length not equal to header size.\n" + " Computed <%u> Header: <%d>\n", + computedLength, localHoff); + + exitCode = 1; + } else if ((infoMask & HEAP_HASNULL) && bitmapLength) { + printf(" t_bits: "); + for (x = 0; x < bitmapLength; x++) { + printf("[%u]: 0x%02x ", x, localBits[x]); + if (((x & 0x03) == 0x03) && (x < bitmapLength - 1)) { + printf("\n "); + } + } + printf("\n"); + } + printf("\n"); + } + } +} + +/* Interpret the contents of the item based on whether it has a special + * section and/or the user has hinted */ +static void FormatUHeapItem(char *buffer, unsigned int numBytes, unsigned int startIndex, unsigned int formatAs) +{ + static const char *const spgist_tupstates[4] = {"LIVE", "REDIRECT", "DEAD", "PLACEHOLDER"}; + + if (formatAs == ITEM_INDEX) { + /* It is an IndexTuple item, so dump the index header */ + if (numBytes < sizeof(ItemPointerData)) { + if (numBytes) { + printf(" Error: This item does not look like an index item.\n"); + exitCode = 1; + } + } else { + IndexTuple itup = (IndexTuple)(&(buffer[startIndex])); + + printf(" Block Id: %u linp Index: %u Size: %d\n" + " Has Nulls: %u Has Varwidths: %u\n\n", + ((uint32)((itup->t_tid.ip_blkid.bi_hi << 16) | (uint16)itup->t_tid.ip_blkid.bi_lo)), + itup->t_tid.ip_posid, (int)IndexTupleSize(itup), IndexTupleHasNulls(itup) ? 1 : 0, + IndexTupleHasVarwidths(itup) ? 1 : 0); + + if (numBytes != IndexTupleSize(itup)) { + printf(" Error: Item size difference. Given <%u>, " + "Internal <%d>.\n", + numBytes, (int)IndexTupleSize(itup)); + exitCode = 1; + } + } + } else if (formatAs == ITEM_SPG_INNER) { + /* It is an SpGistInnerTuple item, so dump the index header */ + if (numBytes < SGITHDRSZ) { + if (numBytes) { + printf(" Error: This item does not look like an SPGiST item.\n"); + exitCode = 1; + } + } else { + SpGistInnerTuple itup = (SpGistInnerTuple)(&(buffer[startIndex])); + + printf(" State: %s allTheSame: %d nNodes: %u prefixSize: %u\n\n", spgist_tupstates[itup->tupstate], + itup->allTheSame, itup->nNodes, itup->prefixSize); + + if (numBytes != itup->size) { + printf(" Error: Item size difference. Given <%u>, " + "Internal <%d>.\n", + numBytes, (int)itup->size); + exitCode = 1; + } else if (itup->prefixSize == MAXALIGN(itup->prefixSize)) { + int i; + SpGistNodeTuple node; + + /* Dump the prefix contents in hex and ascii */ + if ((blockOptions & BLOCK_FORMAT) && SGITHDRSZ + itup->prefixSize <= numBytes) { + FormatBinary(buffer, SGITHDRSZ + itup->prefixSize, startIndex); + } + + /* Try to print the nodes, but only while pointer is sane */ + SGITITERATE(itup, i, node) + { + int off = (char *)node - (char *)itup; + + if (off + SGNTHDRSZ > numBytes) { + break; + } + printf(" Node %2u: Downlink: %u/%u Size: %d Null: %u\n", i, + ((uint32)((node->t_tid.ip_blkid.bi_hi << 16) | (uint16)node->t_tid.ip_blkid.bi_lo)), + node->t_tid.ip_posid, (int)IndexTupleSize(node), IndexTupleHasNulls(node) ? 1 : 0); + /* Dump the node's contents in hex and ascii */ + if ((blockOptions & BLOCK_FORMAT) && off + IndexTupleSize(node) <= numBytes) { + FormatBinary(buffer, IndexTupleSize(node), startIndex + off); + } + if (IndexTupleSize(node) != MAXALIGN(IndexTupleSize(node))) { + break; + } + } + } + printf("\n"); + } + } else if (formatAs == ITEM_SPG_LEAF) { + /* It is an SpGistLeafTuple item, so dump the index header */ + if (numBytes < SGLTHDRSZ) { + if (numBytes) { + printf(" Error: This item does not look like an SPGiST item.\n"); + exitCode = 1; + } + } else { + SpGistLeafTuple itup = (SpGistLeafTuple)(&(buffer[startIndex])); + + printf(" State: %s nextOffset: %u Block Id: %u linp Index: %u\n\n", spgist_tupstates[itup->tupstate], + itup->nextOffset, + ((uint32)((itup->heapPtr.ip_blkid.bi_hi << 16) | (uint16)itup->heapPtr.ip_blkid.bi_lo)), + itup->heapPtr.ip_posid); + + if (numBytes != itup->size) { + printf(" Error: Item size difference. Given <%u>, " + "Internal <%d>.\n", + numBytes, (int)itup->size); + exitCode = 1; + } + } + } else { + /* It is a HeapTuple item, so dump the heap header */ + unsigned int alignedSize = UHeapDiskTupleDataHeaderSize; + + if (numBytes < alignedSize) { + if (numBytes) { + printf(" Error: This item does not look like a heap item.\n"); + exitCode = 1; + } + } else { + char flagString[256]; + unsigned int bitmapLength = 0; + unsigned int oidLength = 0; + unsigned int computedLength; + unsigned int infoMask; + unsigned int infoMask2; + int localNatts; + unsigned int localHoff; + bits8 *localBits; + unsigned int localBitOffset; + UHeapDiskTuple utuple = (UHeapDiskTuple)(&buffer[startIndex]); + + infoMask = utuple->flag; + infoMask2 = utuple->flag2; + localBits = &(utuple->data[0]); + localNatts = UHeapTupleHeaderGetNatts(utuple); + localHoff = utuple->t_hoff; + localBitOffset = offsetof(UHeapDiskTupleData, data); + + printf(" xid: %u \t td: %d \t locker_td : %d \n" + " Attributes: %d, localHoff : %d \n", + utuple->xid, utuple->td_id, utuple->reserved, infoMask2, localHoff); + + /* Place readable versions of the tuple info mask into a buffer. + * Assume that the string can not expand beyond 256. */ + flagString[0] = '\0'; + if (infoMask & UHEAP_HAS_NULL) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_HAS_NULL|"); + } + if (infoMask & UHEAP_HASVARWIDTH) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_HASVARWIDTH|"); + } + if (infoMask & UHEAP_HASEXTERNAL) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_HASEXTERNAL|"); + } + if (infoMask & UHEAP_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_DELETED|"); + } + if (infoMask & UHEAP_INPLACE_UPDATED) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_INPLACE_UPDATED|"); + } + if (infoMask & UHEAP_UPDATED) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_UPDATED|"); + } + if (infoMask & UHEAP_XID_KEYSHR_LOCK) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_XID_KEYSHR_LOCK|"); + } + if (infoMask & UHEAP_XID_NOKEY_EXCL_LOCK) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_XID_NOKEY_EXCL_LOCK|"); + } + if (infoMask & UHEAP_XID_EXCL_LOCK) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_XID_EXCL_LOCK|"); + } + if (infoMask & UHEAP_MULTI_LOCKERS) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_MULTI_LOCKERS|"); + } + if (infoMask & UHEAP_INVALID_XACT_SLOT) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_INVALID_XACT_SLOT|"); + } + if (infoMask & UHEAP_XID_COMMITTED) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_XID_COMMITTED|"); + } + if (infoMask & UHEAP_XID_INVALID) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_XID_INVALID|"); + } + if (infoMask & UHEAP_XID_FROZEN) { + strcat_s(flagString, MAXOUTPUTLEN, "UHEAP_XID_FROZEN|"); + } + + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + + printf(" infomask: 0x%04x (%s) \n", infoMask, flagString); + + /* As t_bits is a variable length array, determine the length of + * the header proper */ + if (infoMask & UHEAP_HAS_NULL) { + bitmapLength = BITMAPLEN(localNatts); + } else { + bitmapLength = 0; + } + + computedLength = localBitOffset + bitmapLength + oidLength; + + printf("\n"); + } + } +} + +/* On blocks that have special sections, print the contents + * according to previously determined special section type */ +static void FormatSpecial(char *buffer) +{ + PageHeader pageHeader = (PageHeader)buffer; + char flagString[100] = "\0"; + unsigned int specialOffset = pageHeader->pd_special; + unsigned int specialSize = (blockSize >= specialOffset) ? (blockSize - specialOffset) : 0; + + printf(" -----\n"); + switch (specialType) { + case SPEC_SECT_ERROR_UNKNOWN: + case SPEC_SECT_ERROR_BOUNDARY: + printf(" Error: Invalid special section encountered.\n"); + exitCode = 1; + break; + + case SPEC_SECT_SEQUENCE: + printf(" Sequence: 0x%08x\n", SEQUENCE_MAGIC); + break; + + /* Btree index section */ + case SPEC_SECT_INDEX_BTREE: { + BTPageOpaque btreeSection = (BTPageOpaque)(buffer + specialOffset); + + if (btreeSection->bt_internal.btpo_flags & BTP_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_ROOT) { + strcat_s(flagString, MAXOUTPUTLEN, "ROOT|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_META) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_HALF_DEAD) { + strcat_s(flagString, MAXOUTPUTLEN, "HALFDEAD|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_SPLIT_END) { + strcat_s(flagString, MAXOUTPUTLEN, "SPLITEND|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_HAS_GARBAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "HASGARBAGE|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_INCOMPLETE_SPLIT) { + strcat_s(flagString, MAXOUTPUTLEN, "INCOMPLETESPLIT|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + + printf(" BTree Index Section:\n" + " Flags: 0x%04x (%s)\n" + " Blocks: Previous (%d) Next (%d) %s (%d) CycleId (%d)\n\n", + btreeSection->bt_internal.btpo_flags, flagString, btreeSection->bt_internal.btpo_prev, + btreeSection->bt_internal.btpo_next, + (btreeSection->bt_internal.btpo_flags & BTP_DELETED) ? "Next XID" : "Level", + btreeSection->bt_internal.btpo.level, btreeSection->bt_internal.btpo_cycleid); + } break; + + /* Hash index section */ + case SPEC_SECT_INDEX_HASH: { + HashPageOpaque hashSection = (HashPageOpaque)(buffer + specialOffset); + + if ((hashSection->hasho_flag & LH_PAGE_TYPE) == LH_UNUSED_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "UNUSED|"); + } + if (hashSection->hasho_flag & LH_OVERFLOW_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "OVERFLOW|"); + } + if (hashSection->hasho_flag & LH_BUCKET_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET|"); + } + if (hashSection->hasho_flag & LH_BITMAP_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "BITMAP|"); + } + if (hashSection->hasho_flag & LH_META_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (hashSection->hasho_flag & LH_BUCKET_BEING_POPULATED) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET_BEING_POPULATED|"); + } + if (hashSection->hasho_flag & LH_BUCKET_BEING_SPLIT) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET_BEING_SPLIT|"); + } + if (hashSection->hasho_flag & LH_BUCKET_NEEDS_SPLIT_CLEANUP) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET_NEEDS_SPLIT_CLEANUP|"); + } + if (hashSection->hasho_flag & LH_PAGE_HAS_DEAD_TUPLES) { + strcat_s(flagString, MAXOUTPUTLEN, "PAGE_HAS_DEAD_TUPLES|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" Hash Index Section:\n" + " Flags: 0x%04x (%s)\n" + " Bucket Number: 0x%04x\n" + " Blocks: Previous (%d) Next (%d)\n\n", + hashSection->hasho_flag, flagString, hashSection->hasho_bucket, hashSection->hasho_prevblkno, + hashSection->hasho_nextblkno); + } break; + + /* GIST index section */ + case SPEC_SECT_INDEX_GIST: { + GISTPageOpaque gistSection = (GISTPageOpaque)(buffer + specialOffset); + + if (gistSection->flags & F_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (gistSection->flags & F_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (gistSection->flags & F_TUPLES_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "TUPLES_DELETED|"); + } + if (gistSection->flags & F_FOLLOW_RIGHT) { + strcat_s(flagString, MAXOUTPUTLEN, "FOLLOW_RIGHT|"); + } + if (gistSection->flags & F_HAS_GARBAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "HAS_GARBAGE|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" GIST Index Section:\n" + " NSN: 0x%08lx\n" + " RightLink: %d\n" + " Flags: 0x%08x (%s)\n" + " GIST_page_id: 0x%08x\n\n", + gistSection->nsn, gistSection->rightlink, gistSection->flags, flagString, gistSection->gist_page_id); + } break; + + /* GIN index section */ + case SPEC_SECT_INDEX_GIN: { + GinPageOpaque ginSection = (GinPageOpaque)(buffer + specialOffset); + + if (ginSection->flags & GIN_DATA) { + strcat_s(flagString, MAXOUTPUTLEN, "DATA|"); + } + if (ginSection->flags & GIN_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (ginSection->flags & GIN_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (ginSection->flags & GIN_META) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (ginSection->flags & GIN_LIST) { + strcat_s(flagString, MAXOUTPUTLEN, "LIST|"); + } + if (ginSection->flags & GIN_LIST_FULLROW) { + strcat_s(flagString, MAXOUTPUTLEN, "FULLROW|"); + } + if (ginSection->flags & GIN_INCOMPLETE_SPLIT) { + strcat_s(flagString, MAXOUTPUTLEN, "INCOMPLETESPLIT|"); + } + if (ginSection->flags & GIN_COMPRESSED) { + strcat_s(flagString, MAXOUTPUTLEN, "COMPRESSED|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" GIN Index Section:\n" + " Flags: 0x%08x (%s) Maxoff: %d\n" + " Blocks: RightLink (%d)\n\n", + ginSection->flags, flagString, ginSection->maxoff, ginSection->rightlink); + } break; + + /* SP-GIST index section */ + case SPEC_SECT_INDEX_SPGIST: { + SpGistPageOpaque spgistSection = (SpGistPageOpaque)(buffer + specialOffset); + + if (spgistSection->flags & SPGIST_META) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (spgistSection->flags & SPGIST_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (spgistSection->flags & SPGIST_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (spgistSection->flags & SPGIST_NULLS) { + strcat_s(flagString, MAXOUTPUTLEN, "NULLS|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" SPGIST Index Section:\n" + " Flags: 0x%08x (%s)\n" + " nRedirection: %d\n" + " nPlaceholder: %d\n\n", + spgistSection->flags, flagString, spgistSection->nRedirection, spgistSection->nPlaceholder); + } break; + + /* No idea what type of special section this is */ + default: + printf(" Unknown special section type. Type: <%u>.\n", specialType); + exitCode = 1; + break; + } + + /* Dump the formatted contents of the special section */ + if (blockOptions & BLOCK_FORMAT) { + if (specialType == SPEC_SECT_ERROR_BOUNDARY) { + printf(" Error: Special section points off page." + " Unable to dump contents.\n"); + + exitCode = 1; + } else { + FormatBinary(buffer, specialSize, specialOffset); + } + } +} + +/* On blocks that have special sections, print the contents + * according to previously determined special section type */ +static void FormatUHeapSpecial(char *buffer) +{ + UHeapPageHeader upageHeader = (UHeapPageHeader)buffer; + char flagString[100] = "\0"; + unsigned int specialOffset = upageHeader->pd_special; + unsigned int specialSize = (blockSize >= specialOffset) ? (blockSize - specialOffset) : 0; + + printf(" -----\n"); + + switch (specialType) { + case SPEC_SECT_ERROR_UNKNOWN: + case SPEC_SECT_ERROR_BOUNDARY: + printf(" Error: Invalid special section encountered.\n"); + exitCode = 1; + break; + + case SPEC_SECT_SEQUENCE: + printf(" Sequence: 0x%08x\n", SEQUENCE_MAGIC); + break; + + /* Btree index section */ + case SPEC_SECT_INDEX_BTREE: { + BTPageOpaque btreeSection = (BTPageOpaque)(buffer + specialOffset); + + if (btreeSection->bt_internal.btpo_flags & BTP_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_ROOT) { + strcat_s(flagString, MAXOUTPUTLEN, "ROOT|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_META) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_HALF_DEAD) { + strcat_s(flagString, MAXOUTPUTLEN, "HALFDEAD|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_SPLIT_END) { + strcat_s(flagString, MAXOUTPUTLEN, "SPLITEND|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_HAS_GARBAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "HASGARBAGE|"); + } + if (btreeSection->bt_internal.btpo_flags & BTP_INCOMPLETE_SPLIT) { + strcat_s(flagString, MAXOUTPUTLEN, "INCOMPLETESPLIT|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + + printf(" BTree Index Section:\n" + " Flags: 0x%04x (%s)\n" + " Blocks: Previous (%d) Next (%d) %s (%d) CycleId (%d)\n\n", + btreeSection->bt_internal.btpo_flags, flagString, btreeSection->bt_internal.btpo_prev, + btreeSection->bt_internal.btpo_next, + (btreeSection->bt_internal.btpo_flags & BTP_DELETED) ? "Next XID" : "Level", + btreeSection->bt_internal.btpo.level, btreeSection->bt_internal.btpo_cycleid); + } break; + + /* Hash index section */ + case SPEC_SECT_INDEX_HASH: { + HashPageOpaque hashSection = (HashPageOpaque)(buffer + specialOffset); + + if ((hashSection->hasho_flag & LH_PAGE_TYPE) == LH_UNUSED_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "UNUSED|"); + } + if (hashSection->hasho_flag & LH_OVERFLOW_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "OVERFLOW|"); + } + if (hashSection->hasho_flag & LH_BUCKET_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET|"); + } + if (hashSection->hasho_flag & LH_BITMAP_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "BITMAP|"); + } + if (hashSection->hasho_flag & LH_META_PAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (hashSection->hasho_flag & LH_BUCKET_BEING_POPULATED) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET_BEING_POPULATED|"); + } + if (hashSection->hasho_flag & LH_BUCKET_BEING_SPLIT) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET_BEING_SPLIT|"); + } + if (hashSection->hasho_flag & LH_BUCKET_NEEDS_SPLIT_CLEANUP) { + strcat_s(flagString, MAXOUTPUTLEN, "BUCKET_NEEDS_SPLIT_CLEANUP|"); + } + if (hashSection->hasho_flag & LH_PAGE_HAS_DEAD_TUPLES) { + strcat_s(flagString, MAXOUTPUTLEN, "PAGE_HAS_DEAD_TUPLES|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" Hash Index Section:\n" + " Flags: 0x%04x (%s)\n" + " Bucket Number: 0x%04x\n" + " Blocks: Previous (%d) Next (%d)\n\n", + hashSection->hasho_flag, flagString, hashSection->hasho_bucket, hashSection->hasho_prevblkno, + hashSection->hasho_nextblkno); + } break; + + /* GIST index section */ + case SPEC_SECT_INDEX_GIST: { + GISTPageOpaque gistSection = (GISTPageOpaque)(buffer + specialOffset); + + if (gistSection->flags & F_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (gistSection->flags & F_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (gistSection->flags & F_TUPLES_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "TUPLES_DELETED|"); + } + if (gistSection->flags & F_FOLLOW_RIGHT) { + strcat_s(flagString, MAXOUTPUTLEN, "FOLLOW_RIGHT|"); + } + if (gistSection->flags & F_HAS_GARBAGE) { + strcat_s(flagString, MAXOUTPUTLEN, "HAS_GARBAGE|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" GIST Index Section:\n" + " NSN: 0x%08lx\n" + " RightLink: %d\n" + " Flags: 0x%08x (%s)\n" + " GIST_page_id: 0x%08x\n\n", + gistSection->nsn, gistSection->rightlink, gistSection->flags, flagString, gistSection->gist_page_id); + } break; + + /* GIN index section */ + case SPEC_SECT_INDEX_GIN: { + GinPageOpaque ginSection = (GinPageOpaque)(buffer + specialOffset); + + if (ginSection->flags & GIN_DATA) { + strcat_s(flagString, MAXOUTPUTLEN, "DATA|"); + } + if (ginSection->flags & GIN_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (ginSection->flags & GIN_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (ginSection->flags & GIN_META) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (ginSection->flags & GIN_LIST) { + strcat_s(flagString, MAXOUTPUTLEN, "LIST|"); + } + if (ginSection->flags & GIN_LIST_FULLROW) { + strcat_s(flagString, MAXOUTPUTLEN, "FULLROW|"); + } + if (ginSection->flags & GIN_INCOMPLETE_SPLIT) { + strcat_s(flagString, MAXOUTPUTLEN, "INCOMPLETESPLIT|"); + } + if (ginSection->flags & GIN_COMPRESSED) { + strcat_s(flagString, MAXOUTPUTLEN, "COMPRESSED|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" GIN Index Section:\n" + " Flags: 0x%08x (%s) Maxoff: %d\n" + " Blocks: RightLink (%d)\n\n", + ginSection->flags, flagString, ginSection->maxoff, ginSection->rightlink); + } break; + + /* SP-GIST index section */ + case SPEC_SECT_INDEX_SPGIST: { + SpGistPageOpaque spgistSection = (SpGistPageOpaque)(buffer + specialOffset); + + if (spgistSection->flags & SPGIST_META) { + strcat_s(flagString, MAXOUTPUTLEN, "META|"); + } + if (spgistSection->flags & SPGIST_DELETED) { + strcat_s(flagString, MAXOUTPUTLEN, "DELETED|"); + } + if (spgistSection->flags & SPGIST_LEAF) { + strcat_s(flagString, MAXOUTPUTLEN, "LEAF|"); + } + if (spgistSection->flags & SPGIST_NULLS) { + strcat_s(flagString, MAXOUTPUTLEN, "NULLS|"); + } + if (strlen(flagString)) { + flagString[strlen(flagString) - 1] = '\0'; + } + printf(" SPGIST Index Section:\n" + " Flags: 0x%08x (%s)\n" + " nRedirection: %d\n" + " nPlaceholder: %d\n\n", + spgistSection->flags, flagString, spgistSection->nRedirection, spgistSection->nPlaceholder); + } break; + + /* No idea what type of special section this is */ + default: + printf(" Unknown special section type. Type: <%u>.\n", specialType); + exitCode = 1; + break; + } + + /* Dump the formatted contents of the special section */ + if (blockOptions & BLOCK_FORMAT) { + if (specialType == SPEC_SECT_ERROR_BOUNDARY) { + printf(" Error: Special section points off page." + " Unable to dump contents.\n"); + + exitCode = 1; + } else { + FormatBinary(buffer, specialSize, specialOffset); + } + } +} + +/* For each block, dump out formatted header and content information */ +static void FormatBlock(unsigned int blockOptions, unsigned int controlOptions, char *buffer, BlockNumber currentBlock, + unsigned int blockSize, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue, unsigned int *toastRead) +{ + Page page = (Page)buffer; + const char *indent = isToast ? "\t" : ""; + + pageOffset = blockSize * currentBlock; + specialType = GetSpecialSectionType(buffer, page); + + if (!isToast || verbose) { + printf("\n%sBlock %4u **%s***************************************\n", indent, currentBlock, + (bytesToFormat == blockSize) ? "***************" : " PARTIAL BLOCK "); + } + + /* Either dump out the entire block in hex+acsii fashion or + * interpret the data based on block structure */ + if (blockOptions & BLOCK_NO_INTR) { + FormatBinary(buffer, bytesToFormat, 0); + } else { + int rc; + + /* Every block contains a header, items and possibly a special + * section. Beware of partial block reads though */ + if (isUHeap) { + rc = FormatUHeapHeader(buffer, page, currentBlock, isToast); + } else { + rc = FormatHeader(buffer, page, currentBlock, isToast); + } + + /* If we didn't encounter a partial read in the header, carry on... */ + if (rc != EOF_ENCOUNTERED) { + if (isUHeap) { + FormatUHeapItemBlock(buffer, page, isToast, toastOid, toastExternalSize, toastValue, toastRead); + if (specialType != SPEC_SECT_NONE) { + FormatUHeapSpecial(buffer); + } + } else { + FormatItemBlock(buffer, page, isToast, toastOid, toastExternalSize, toastValue, toastRead); + if (specialType != SPEC_SECT_NONE) { + FormatSpecial(buffer); + } + } + } + } +} + +/* Dump out the content of the PG control file */ +static void FormatControl(char *buffer) +{ + unsigned int localPgVersion = 0; + unsigned int controlFileSize = 0; + time_t cd_time; + time_t cp_time; + + printf("\n *********************************************\n\n"); + + /* Check the version */ + if (bytesToFormat >= offsetof(ControlFileData, catalog_version_no)) { + localPgVersion = ((ControlFileData *)buffer)->pg_control_version; + } + + if (localPgVersion >= 72) { + controlFileSize = sizeof(ControlFileData); + } else { + printf("gs_filedump: pg_control version %u not supported.\n", localPgVersion); + return; + } + + /* Interpret the control file if it's all there */ + if (bytesToFormat >= controlFileSize) { + ControlFileData *controlData = (ControlFileData *)buffer; + CheckPoint *checkPoint = &(controlData->checkPointCopy); + pg_crc32 crcLocal; + char *dbState; + + /* Compute a local copy of the CRC to verify the one on disk */ + INIT_CRC32C(crcLocal); + COMP_CRC32C(crcLocal, buffer, offsetof(ControlFileData, crc)); + FIN_CRC32C(crcLocal); + + /* Grab a readable version of the database state */ + switch (controlData->state) { + case DB_STARTUP: + dbState = "STARTUP"; + break; + case DB_SHUTDOWNED: + dbState = "SHUTDOWNED"; + break; + case DB_SHUTDOWNED_IN_RECOVERY: + dbState = "SHUTDOWNED_IN_RECOVERY"; + break; + case DB_SHUTDOWNING: + dbState = "SHUTDOWNING"; + break; + case DB_IN_CRASH_RECOVERY: + dbState = "IN CRASH RECOVERY"; + break; + case DB_IN_ARCHIVE_RECOVERY: + dbState = "IN ARCHIVE RECOVERY"; + break; + case DB_IN_PRODUCTION: + dbState = "IN PRODUCTION"; + break; + default: + dbState = "UNKNOWN"; + break; + } + + /* convert timestamps to system's time_t width */ + cd_time = controlData->time; + cp_time = checkPoint->time; + + printf(" CRC: %s\n" + " pg_control Version: %u%s\n" + " Catalog Version: %u\n" + " Database system Identifier: " UINT64_FORMAT "\n" + " Database cluster State: %s\n" + " pg_control last modifyed: %s" + " Last Checkpoint Record: Log File (%u) Offset (0x%08x)\n" + " Previous Checkpoint Record: Log File (%u) Offset (0x%08x)\n" + " Last Checkpoint Record Redo: Log File (%u) Offset (0x%08x)\n" + " |- TimeLineID: %u\n" + " |- full_path_writes: %s\n" + " |- Next XID: %lu\n" + " |- Next OID: %u\n" + " |- Next MultiXactId: %lu\n" + " |- Next MultiOffset: %lu\n" + " |- oldestXid: %lu\n" + " |- oldestXid's DB: %u\n" + " |- oldestActiveXid: %lu\n" + " |- remove_seg: %X/%lu\n" + " Time of latest checkpoint: %s" + + " Minimum Recovery Point: Log File (%u) Offset (0x%08x)\n" + " Backup start location: %X/%X\n" + " Backup end location: %X/%X\n" + "End-of-backup record required: %s\n" + " Current Setting:\n" + " wal_level: %s\n" + " max_connections: %u\n" + " max_prepared_xacts: %u\n" + " max_locks_per_xact: %u\n" + + " Maximum Data Alignment: %u\n" + " Floating-Point Sample: %.7g%s\n" + " Database Block Size: %u\n" + " Blocks Per Segment: %u\n" + " XLOG Block Size: %u\n" + " XLOG Segment Size: %u\n" + "Maximum length of identifiers: %u\n" + " Maximum columns in an index: %u\n" + "Maximum size of a TOAST chunk: %u\n" + " Date/time type storage: %s\n" + " Float4 argument passing: %s\n" + " Float8 argument passing: %s\n" + " Database system TimeLine: %u\n", + EQ_CRC32C(crcLocal, controlData->crc) ? "Correct" : "Not Correct", controlData->pg_control_version, + (controlData->pg_control_version == PG_CONTROL_VERSION ? "" : " (Not Correct!)"), + controlData->catalog_version_no, controlData->system_identifier, dbState, ctime(&(cd_time)), + (uint32)(controlData->checkPoint >> 32), (uint32)controlData->checkPoint, + (uint32)(controlData->prevCheckPoint >> 32), (uint32)controlData->prevCheckPoint, + (uint32)(checkPoint->redo >> 32), (uint32)checkPoint->redo, checkPoint->ThisTimeLineID, + checkPoint->fullPageWrites ? _("on") : _("off"), checkPoint->nextXid, checkPoint->nextOid, + checkPoint->nextMulti, checkPoint->nextMultiOffset, checkPoint->oldestXid, checkPoint->oldestXidDB, + checkPoint->oldestActiveXid, (uint32)(checkPoint->remove_seg >> 32), checkPoint->remove_seg, + ctime(&cp_time), + + (uint32)(controlData->minRecoveryPoint >> 32), (uint32)controlData->minRecoveryPoint, + + (uint32)(controlData->backupStartPoint >> 32), (uint32)controlData->backupStartPoint, + (uint32)(controlData->backupEndPoint >> 32), (uint32)controlData->backupEndPoint, + controlData->backupEndRequired ? _("yes") : _("no"), + + wal_level_str((WalLevel)controlData->wal_level), controlData->MaxConnections, + controlData->max_prepared_xacts, controlData->max_locks_per_xact, + + controlData->maxAlign, controlData->floatFormat, + (controlData->floatFormat == FLOATFORMAT_VALUE ? "" : " (Not Correct!)"), controlData->blcksz, + controlData->relseg_size, controlData->xlog_blcksz, controlData->xlog_seg_size, controlData->nameDataLen, + controlData->indexMaxKeys, controlData->toast_max_chunk_size, + (controlData->enableIntTimes ? _("64-bit integers") : _("floating-point numbers")), + (controlData->float4ByVal ? _("by value") : _("by reference")), + (controlData->float8ByVal ? _("by value") : _("by reference")), controlData->timeline); + } else { + printf(" Error: pg_control file size incorrect.\n" + " Size: Correct <%u> Received <%u>.\n\n", + controlFileSize, bytesToFormat); + + /* If we have an error, force a formatted dump so we can see + * where things are going wrong */ + controlOptions |= CONTROL_FORMAT; + + exitCode = 1; + } + + /* Dump hex and ascii representation of data */ + if (controlOptions & CONTROL_FORMAT) { + printf(" *****************" + "**********************\n\n"); + FormatBinary(buffer, bytesToFormat, 0); + } +} + +/* Dump out the contents of the block in hex and ascii. + * BYTES_PER_LINE bytes are formatted in each line. */ +static void FormatBinary(char *buffer, unsigned int numBytes, unsigned int startIndex) +{ + unsigned int index = 0; + unsigned int stopIndex = 0; + unsigned int x = 0; + unsigned int lastByte = startIndex + numBytes; + + if (numBytes) { + /* Iterate through a printable row detailing the current + * address, the hex and ascii values */ + for (index = startIndex; index < lastByte; index += BYTES_PER_LINE) { + stopIndex = index + BYTES_PER_LINE; + + /* Print out the address */ + if (blockOptions & BLOCK_ABSOLUTE) { + printf(" %08x: ", (unsigned int)(pageOffset + index)); + } else { + printf(" %04x: ", (unsigned int)index); + } + + /* Print out the hex version of the data */ + for (x = index; x < stopIndex; x++) { + if (x < lastByte) { + printf("%02x", 0xff & ((unsigned)buffer[x])); + } else { + printf(" "); + } + if ((x & 0x03) == 0x03) { + printf(" "); + } + } + printf(" "); + + /* Print out the ascii version of the data */ + for (x = index; x < stopIndex; x++) { + if (x < lastByte) { + printf("%c", isprint(buffer[x]) ? buffer[x] : '.'); + } else { + printf(" "); + } + } + printf("\n"); + } + printf("\n"); + } +} + +/* Dump the binary image of the block */ +static void DumpBinaryBlock(char *buffer) +{ + unsigned int x; + + for (x = 0; x < bytesToFormat; x++) { + putchar(buffer[x]); + } +} + +/* Control the dumping of the blocks within the file */ +int DumpFileContents(unsigned int blockOptions, unsigned int controlOptions, FILE *fp, unsigned int blockSize, + int blockStart, int blockEnd, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue) +{ + unsigned int initialRead = 1; + unsigned int contentsToDump = 1; + unsigned int toastDataRead = 0; + BlockNumber currentBlock = 0; + int result = 0; + /* On a positive block size, allocate a local buffer to store + * the subsequent blocks */ + char *block = (char *)malloc(blockSize); + if (!block) { + printf("\nError: Unable to create buffer of size <%d>.\n", blockSize); + result = 1; + } + + /* If the user requested a block range, seek to the correct position + * within the file for the start block. */ + if (result == 0 && blockOptions & BLOCK_RANGE) { + unsigned int position = blockSize * blockStart; + if (fseek(fp, position, SEEK_SET) != 0) { + printf("Error: Seek error encountered before requested " + "start block <%d>.\n", + blockStart); + contentsToDump = 0; + result = 1; + } else { + currentBlock = blockStart; + } + } + + /* Iterate through the blocks in the file until you reach the end or + * the requested range end */ + while (contentsToDump && result == 0) { + bytesToFormat = fread(block, 1, blockSize, fp); + + if (bytesToFormat == 0) { + /* fseek() won't pop an error if you seek passed eof. The next + * subsequent read gets the error. */ + if (initialRead) { + printf("Error: Premature end of file encountered.\n"); + } else if (!(blockOptions & BLOCK_BINARY)) { + printf("\n*** End of File Encountered. Last Block " + "Read: %d ***\n", + currentBlock - 1); + } + + contentsToDump = 0; + } else { + if (blockOptions & BLOCK_BINARY) { + DumpBinaryBlock(block); + } else { + if (controlOptions & CONTROL_DUMP) { + FormatControl(block); + contentsToDump = false; + } else { + FormatBlock(blockOptions, controlOptions, block, currentBlock, blockSize, isToast, toastOid, + toastExternalSize, toastValue, &toastDataRead); + } + } + } + + /* Check to see if we are at the end of the requested range. */ + if ((blockOptions & BLOCK_RANGE) && ((int)currentBlock >= blockEnd) && (contentsToDump)) { + /* Don't print out message if we're doing a binary dump */ + if (!(blockOptions & BLOCK_BINARY)) { + printf("\n*** End of Requested Range Encountered. " + "Last Block Read: %d ***\n", + currentBlock); + } + contentsToDump = 0; + } else { + currentBlock++; + } + + initialRead = 0; + + /* If TOAST data is read */ + if (isToast && toastDataRead >= toastExternalSize) { + break; + } + } + + free(block); + + return result; +} + +/* Control the dumping of the blocks within the file */ +int DumpUHeapFileContents(unsigned int blockOptions, unsigned int controlOptions, FILE *fp, unsigned int blockSize, + int blockStart, int blockEnd, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue) +{ + unsigned int initialRead = 1; + unsigned int contentsToDump = 1; + unsigned int toastDataRead = 0; + BlockNumber currentBlock = 0; + int result = 0; + /* On a positive block size, allocate a local buffer to store + * the subsequent blocks */ + char *block = (char *)malloc(blockSize); + if (!block) { + printf("\nError: Unable to create buffer of size <%d>.\n", blockSize); + result = 1; + } + + /* If the user requested a block range, seek to the correct position + * within the file for the start block. */ + if (result == 0 && blockOptions & BLOCK_RANGE) { + unsigned int position = blockSize * blockStart; + + if (fseek(fp, position, SEEK_SET) != 0) { + printf("Error: Seek error encountered before requested " + "start block <%d>.\n", + blockStart); + contentsToDump = 0; + result = 1; + } else { + currentBlock = blockStart; + } + } + + /* Iterate through the blocks in the file until you reach the end or + * the requested range end */ + while (contentsToDump && result == 0) { + bytesToFormat = fread(block, 1, blockSize, fp); + + if (bytesToFormat == 0) { + /* fseek() won't pop an error if you seek passed eof. The next + * subsequent read gets the error. */ + if (initialRead) { + printf("Error: Premature end of file encountered.\n"); + } else if (!(blockOptions & BLOCK_BINARY)) { + printf("\n*** End of File Encountered. Last Block " + "Read: %d ***\n", + currentBlock - 1); + } + + contentsToDump = 0; + } else { + if (blockOptions & BLOCK_BINARY) { + DumpBinaryBlock(block); + } else { + if (controlOptions & CONTROL_DUMP) { + FormatControl(block); + contentsToDump = false; + } else { + FormatBlock(blockOptions, controlOptions, block, currentBlock, blockSize, isToast, toastOid, + toastExternalSize, toastValue, &toastDataRead); + } + } + } + + /* Check to see if we are at the end of the requested range. */ + if ((blockOptions & BLOCK_RANGE) && ((int)currentBlock >= blockEnd) && (contentsToDump)) { + /* Don't print out message if we're doing a binary dump */ + if (!(blockOptions & BLOCK_BINARY)) { + printf("\n*** End of Requested Range Encountered. " + "Last Block Read: %d ***\n", + currentBlock); + } + contentsToDump = 0; + } else { + currentBlock++; + } + + initialRead = 0; + + /* If TOAST data is read */ + if (isToast && toastDataRead >= toastExternalSize) { + break; + } + } + + free(block); + + return result; +} + +int PrintRelMappings(void) +{ + /* For storing ingested data */ + char charbuf[RELMAPPER_FILESIZE]; + RelMapFile *map; + RelMapping *mappings; + RelMapping m; + int bytesRead; + + /* For confirming Magic Number correctness */ + char m1[RELMAPPER_MAGICSIZE]; + char m2[RELMAPPER_MAGICSIZE]; + int magic_ref = RELMAPPER_FILEMAGIC_4K; + int magic_val; + int num_loops; + errno_t rc; + + /* Read in the file */ + rewind(fp); // Make sure to start from the beginning + bytesRead = fread(charbuf, 1, RELMAPPER_FILESIZE, fp); + + if (bytesRead != RELMAPPER_FILESIZE) { + printf("Read %d bytes, expected %d\n", bytesRead, RELMAPPER_FILESIZE); + return 0; + } + + /* Convert to RelMapFile type for usability */ + map = (RelMapFile *)charbuf; + + /* Check and print Magic Number correctness */ + printf("Magic Number: 0x%x", map->magic); + magic_val = map->magic; + + rc = memcpy_s(m1, RELMAPPER_MAGICSIZE, &magic_ref, RELMAPPER_MAGICSIZE); + securec_check(rc, "\0", "\0"); + + rc = memcpy_s(m2, RELMAPPER_MAGICSIZE, &magic_val, RELMAPPER_MAGICSIZE); + securec_check(rc, "\0", "\0"); + + if (memcmp(m1, m2, RELMAPPER_MAGICSIZE) == 0) { + printf(" (CORRECT)\n"); + } else { + printf(" (INCORRECT)\n"); + } + + /* Print Mappings */ + printf("Num Mappings: %d\n", map->num_mappings); + printf("Detailed Mappings list:\n"); + mappings = map->mappings; + + /* Limit number of mappings as per MAX_MAPPINGS */ + num_loops = map->num_mappings; + if (map->num_mappings > MAX_MAPPINGS_4K) { + num_loops = MAX_MAPPINGS_4K; + printf(" NOTE: listing has been limited to the first %d mappings\n", MAX_MAPPINGS_4K); + printf(" (perhaps your file is not a valid pg_filenode.map file?)\n"); + } + + for (int i = 0; i < num_loops; i++) { + m = mappings[i]; + printf("OID: %u\tFilenode: %u\n", m.mapoid, m.mapfilenode); + } + return 1; +} + +/* Consume the options and iterate through the given file, formatting as + * requested. */ +int main(int argv, char **argc) +{ + /* If there is a parameter list, validate the options */ + unsigned int validOptions; + + validOptions = (argv < 2) ? OPT_RC_COPYRIGHT : ConsumeOptions(argv, argc); + + /* Display valid options if no parameters are received or invalid options + * where encountered */ + if (validOptions != OPT_RC_VALID) { + DisplayOptions(validOptions); + } else if (isRelMapFile) { + CreateDumpFileHeader(argv, argc); + exitCode = PrintRelMappings(); + } else { + /* Don't dump the header if we're dumping binary pages */ + if (!(blockOptions & BLOCK_BINARY)) { + CreateDumpFileHeader(argv, argc); + } + + /* If the user has not forced a block size, use the size of the + * control file data or the information from the block 0 header */ + if (controlOptions) { + if (!(controlOptions & CONTROL_FORCED)) { + blockSize = sizeof(ControlFileData); + } + } else if (!(blockOptions & BLOCK_FORCED)) { + blockSize = GetBlockSize(fp); + } + if (isSegment) { + initSegmentInfo(fp, fp_toast); + } else if (isUHeap) { + exitCode = DumpUHeapFileContents(blockOptions, controlOptions, fp, blockSize, blockStart, blockEnd, + false, /* is toast realtion */ + 0, /* no toast Oid */ + 0, /* no toast external size */ + NULL /* no out toast value */ + ); + } else { + exitCode = DumpFileContents(blockOptions, controlOptions, fp, blockSize, blockStart, blockEnd, + false, /* is toast realtion */ + 0, /* no toast Oid */ + 0, /* no toast external size */ + NULL /* no out toast value */ + ); + } + } + + if (fp) { + fclose(fp); + } + if (fp_toast) { + fclose(fp_toast); + } + + exit(exitCode); + return 0; +} diff --git a/contrib/gs_filedump/gs_filedump.h b/contrib/gs_filedump/gs_filedump.h new file mode 100644 index 0000000000000000000000000000000000000000..c23ffd44e87b5df02ecc355c4a973e480211e96f --- /dev/null +++ b/contrib/gs_filedump/gs_filedump.h @@ -0,0 +1,188 @@ + +/* + * gs_filedump.h - PostgreSQL file dump utility for dumping and + * formatting heap (data), index and control files. + * + * Copyright (c) 2002-2010 Red Hat, Inc. + * Copyright (c) 2011-2022, PostgreSQL Global Development Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Original Author: Patrick Macdonald + */ + +#define FD_VERSION "6.0.0" /* version ID of gs_filedump */ +#define FD_PG_VERSION "openGauss 6.x.x ..." /* PG version it works with */ + +#include "postgres_fe.h" +#include "postgres.h" + +#include +#include + +#include "access/gin_private.h" +#include "access/gist.h" +#include "access/hash.h" +#include "access/nbtree.h" +#include "access/spgist_private.h" + +#define MAXOUTPUTLEN 1048576 + +/* access/htup.h*/ +#define HeapTupleHeaderGetXmin_tuple(tup) \ + (HeapTupleHeaderXminFrozen(tup) ? FrozenTransactionId : (tup)->t_choice.t_heap.t_xmin) + +#define HeapTupleHeaderXminFrozen(tup) (((tup)->t_infomask & (HEAP_XMIN_FROZEN)) == HEAP_XMIN_FROZEN) + +#define HEAP_MOVED_OFF \ + 0x4000 /* moved to another place by pre-9.0 \ + * VACUUM FULL; kept for binary \ + * upgrade support */ +#define HEAP_MOVED_IN \ + 0x8000 /* moved from another place by pre-9.0 \ + * VACUUM FULL; kept for binary \ + * upgrade support */ +#define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN) + +/* storage/item/itemptr.h */ +#define GinItemPointerSetBlockNumber(pointer, blkno) (ItemPointerSetBlockNumber((pointer), (blkno))) +#define GinItemPointerSetOffsetNumber(pointer, offnum) (ItemPointerSetOffsetNumber((pointer), (offnum))) + +/* access/gist.h */ +#define F_HAS_GARBAGE \ + (1 << 4) /* some tuples on the page are dead, \ + * but not deleted yet */ + +/* Options for Block formatting operations */ +extern unsigned int blockOptions; + +typedef enum blockSwitches { + BLOCK_ABSOLUTE = 0x00000001, /* -a: Absolute(vs Relative) addressing */ + BLOCK_BINARY = 0x00000002, /* -b: Binary dump of block */ + BLOCK_FORMAT = 0x00000004, /* -f: Formatted dump of blocks / control file */ + BLOCK_FORCED = 0x00000008, /* -S: Block size forced */ + BLOCK_NO_INTR = 0x00000010, /* -d: Dump straight blocks */ + BLOCK_RANGE = 0x00000020, /* -R: Specific block range to dump */ + BLOCK_CHECKSUMS = 0x00000040, /* -k: verify block checksums */ + BLOCK_DECODE = 0x00000080, /* -D: Try to decode tuples */ + BLOCK_DECODE_TOAST = 0x00000100, /* -t: Try to decode TOAST values */ + BLOCK_IGNORE_OLD = 0x00000200, /* -o: Decode old values */ + BLOCK_USTORE = 0x00000400 /* -u: storage Engine ustore */ +} blockSwitches; + +/* Segment-related options */ +extern unsigned int segmentOptions; + +typedef enum segmentSwitches { + SEGMENT_SIZE_FORCED = 0x00000001, /* -s: Segment size forced */ + SEGMENT_NUMBER_FORCED = 0x00000002, /* -n: Segment number forced */ +} segmentSwitches; + +/* -R[start]:Block range start */ +extern int blockStart; + +/* -R[end]:Block range end */ +extern int blockEnd; + +/* Options for Item formatting operations */ +extern unsigned int itemOptions; + +typedef enum itemSwitches { + ITEM_DETAIL = 0x00000001, /* -i: Display interpreted items */ + ITEM_HEAP = 0x00000002, /* -y: Blocks contain HeapTuple items */ + ITEM_INDEX = 0x00000004, /* -x: Blocks contain IndexTuple items */ + ITEM_SPG_INNER = 0x00000008, /* Blocks contain SpGistInnerTuple items */ + ITEM_SPG_LEAF = 0x00000010 /* Blocks contain SpGistLeafTuple items */ +} itemSwitches; + +/* Options for Control File formatting operations */ +extern unsigned int controlOptions; + +typedef enum controlSwitches { + CONTROL_DUMP = 0x00000001, /* -c: Dump control file */ + CONTROL_FORMAT = BLOCK_FORMAT, /* -f: Formatted dump of control file */ + CONTROL_FORCED = BLOCK_FORCED /* -S: Block size forced */ +} controlSwitches; + +/* Possible value types for the Special Section */ +typedef enum specialSectionTypes { + SPEC_SECT_NONE, /* No special section on block */ + SPEC_SECT_SEQUENCE, /* Sequence info in special section */ + SPEC_SECT_INDEX_BTREE, /* BTree index info in special section */ + SPEC_SECT_INDEX_HASH, /* Hash index info in special section */ + SPEC_SECT_INDEX_GIST, /* GIST index info in special section */ + SPEC_SECT_INDEX_GIN, /* GIN index info in special section */ + SPEC_SECT_INDEX_SPGIST, /* SP - GIST index info in special section */ + SPEC_SECT_ERROR_UNKNOWN, /* Unknown error */ + SPEC_SECT_ERROR_BOUNDARY /* Boundary error */ +} specialSectionTypes; + +extern unsigned int specialType; + +/* Possible return codes from option validation routine. + * gs_filedump doesn't do much with them now but maybe in + * the future... */ +typedef enum optionReturnCodes { + OPT_RC_VALID, /* All options are valid */ + OPT_RC_INVALID, /* Improper option string */ + OPT_RC_FILE, /* File problems */ + OPT_RC_DUPLICATE, /* Duplicate option encountered */ + OPT_RC_COPYRIGHT /* Copyright should be displayed */ +} optionReturnCodes; + +/* Simple macro to check for duplicate options and then set + * an option flag for later consumption */ +#define SET_OPTION(_x, _y, _z) \ + if (_x & _y) { \ + rc = OPT_RC_DUPLICATE; \ + duplicateSwitch = _z; \ + } else \ + _x |= _y; + +#define SEQUENCE_MAGIC 0x1717 /* PostgreSQL defined magic number */ +#define EOF_ENCOUNTERED (-1) /* Indicator for partial read */ +#define BYTES_PER_LINE 16 /* Format the binary 16 bytes per line */ + +/* Constants for pg_relnode.map decoding */ +#define RELMAPPER_MAGICSIZE 4 +#define RELMAPPER_FILESIZE 512 +/* From utils/cache/relmapper.c -- Maybe ask community to put + * these into utils/cache/relmapper.h? */ +#define RELMAPPER_FILEMAGIC 0x592717 +#define MAX_MAPPINGS 62 +#define SEGTOASTTAG "sgtt" + +extern char *fileName; +extern bool isUHeap; +extern bool isSegment; + +extern int tableRelfilenode; +extern int toastRelfilenode; + + + +/* + * Function Prototypes + */ +unsigned int GetBlockSize(FILE *fp); +unsigned int GetUHeapBlockSize(FILE *fp); + +int DumpFileContents(unsigned int blockOptions, unsigned int controlOptions, FILE *fp, unsigned int blockSize, + int blockStart, int blockEnd, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue); + +int DumpUHeapFileContents(unsigned int blockOptions, unsigned int controlOptions, FILE *fp, unsigned int blockSize, + int blockStart, int blockEnd, bool isToast, Oid toastOid, unsigned int toastExternalSize, + char *toastValue); diff --git a/contrib/gs_filedump/run_test.sql b/contrib/gs_filedump/run_test.sql new file mode 100644 index 0000000000000000000000000000000000000000..4d01cf7523fc769225b341f34aa473fce398d9e1 --- /dev/null +++ b/contrib/gs_filedump/run_test.sql @@ -0,0 +1,16 @@ +\echo Testing :relname + +vacuum :"relname"; +checkpoint; + +select relfilenode from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as oid \gset +\set output :relname '.heap' +\lo_export :oid :output + +\setenv relname :relname +\! gs_filedump -D $relname $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" + +-- +---------------------------------------------------------------------------------------------- +-- diff --git a/contrib/gs_filedump/segment.cpp b/contrib/gs_filedump/segment.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b0e91238cd73fbeb6cff92bbdbe23250f96d9410 --- /dev/null +++ b/contrib/gs_filedump/segment.cpp @@ -0,0 +1,408 @@ +#include +#include + +#include "gs_filedump.h" +#include "storage/smgr/segment.h" +#include "segment.h" + +/* Program exit code */ +static int exitCode = 0; +int blockSize = 8192; + +/* Return a palloc string, and callers should free it */ +char *slice_filename(char *filename, int sliceno) +{ + char *res = NULL; + int len = strlen(filename); + if (sliceno == 0) { + res = (char *)malloc(len + 1); + errno_t rc = sprintf_s(res, len + 1, "%s", filename); + securec_check(rc, "\0", "\0"); + } else { + res = (char *)malloc(len + MAX_LEN_OF_MAXINTRANGE); + errno_t rc = sprintf_s(res, len + MAX_LEN_OF_MAXINTRANGE, "%s.%d", filename, sliceno); + securec_check(rc, "\0", "\0"); + } + return res; +} + +template +int readHeadPage(FILE *fp, unsigned int relfilenode, T *page, long int magic) +{ + unsigned int blockSize = 8192; + char *block = (char *)malloc(blockSize); + if (!block) { + fprintf(stderr, "Error: Memory allocation failed.\n"); + return -1; + } + unsigned int position = relfilenode * blockSize; + + fseek(fp, position, SEEK_SET); + + /* 读取块数据并检查是否成功 */ + unsigned int bytesToFormat = fread(block, 1, blockSize, fp); + + if (bytesToFormat != blockSize) { + fprintf(stderr, "Error: Failed to read block from file.\n"); + free(block); + return -1; + } + + char *buffer_temp = (char *)memmem(block, blockSize, &magic, sizeof(magic)); + bool isMagicMatched; + + if (buffer_temp != NULL) { + if (magic == BMTLEVEL0_MAGIC) { + isMagicMatched = IsBMTLevel0Block((T *)buffer_temp); + } else { + isMagicMatched = IsNormalSegmentHead((T *)buffer_temp); + } + if (isMagicMatched) { + errno_t rc = memcpy_s(page, sizeof(T), (T *)buffer_temp, sizeof(T)); + securec_check(rc, "\0", "\0"); + } else { + printf("Warning: Is not a NormalSegmentHead\n"); + free(block); + return -1; + } + } else { + printf("Warning: SEGMENT_HEAD_MAGIC not found\n"); + free(block); + return -1; + } + + free(block); + return 0; +} + +void initSegmentInfo(FILE *fp, FILE *fp_toast) +{ + /* Path to directory with TOAST realtion file */ + char *toast_relation_path; + /* target segment toast filename */ + char toast_relation_filename[MAXPGPATH]; + /* Filename of TOAST relation file */ + FILE *toast_rel_fp; + /* filename of source of segment toast data file */ + char segmentToastDataFile[MAXPGPATH]; + char *segmentToastDataFileTag; + + errno_t rc; + int res; + + SegmentHead *seg_head = new SegmentHead(); + + res = readHeadPage(fp, (tableRelfilenode % DF_FILE_SLICE_BLOCKS), seg_head, SEGMENT_HEAD_MAGIC); + if (res != 0) { + printf("Error: SEGMENT_HEAD_MAGIC not found\n"); + exit(-1); + } + if (blockStart < 0) { + blockStart = 0; + } + if (blockEnd < 0) { + blockEnd = seg_head->nblocks - 1; + } + printf("\n* Block Number: <%d> -----\n", seg_head->nblocks); + if (blockEnd >= (int)seg_head->nblocks) { + printf("* Error: Block Number <%d> is out of range, max is <%d>\n", blockEnd, seg_head->nblocks); + exit(-1); + } + + toast_relation_path = strdup(fileName); + get_parent_directory(toast_relation_path); + + /* copy toast of segment to a new file if toastRelfilenode is given. */ + if (toastRelfilenode >= 0) { + + rc = sprintf_s(toast_relation_filename, MAXPGPATH, "%s/%d_%s", *toast_relation_path ? toast_relation_path : ".", + toastRelfilenode, SEGTOASTTAG); + securec_check(rc, "\0", "\0"); + + if (access(toast_relation_filename, F_OK) == -1) { + toast_rel_fp = fopen(toast_relation_filename, "wb"); + } else { + toast_rel_fp = fopen(toast_relation_filename, "w"); + } + + if (toast_rel_fp) { + fclose(toast_rel_fp); + } else { + printf("Error: Failed to open or create file %s\n", toast_relation_filename); + exit(-1); + } + + SegmentHead *seg_toast_head = new SegmentHead(); + res = readHeadPage(fp_toast, (toastRelfilenode % DF_FILE_SLICE_BLOCKS), seg_toast_head, + SEGMENT_HEAD_MAGIC); + if (res != 0) { + printf("Error: SEGMENT_HEAD_MAGIC not found\n"); + exit(-1); + } + + /* copy SegmentHead.level0_slots */ + for (unsigned int l0slot = 0; l0slot < BMT_HEADER_LEVEL0_SLOTS; l0slot++) { + unsigned int blocknum = seg_toast_head->level0_slots[l0slot]; + if (blocknum == 0) { + break; + } + unsigned int extentPageCount = ExtentSizeByCount(l0slot); + unsigned int seg_fileno = EXTENT_SIZE_TO_TYPE(extentPageCount); + rc = snprintf_s(segmentToastDataFile, MAXPGPATH, MAXPGPATH, "%s/%d", toast_relation_path, seg_fileno); + securec_check(rc, "\0", "\0"); + segmentToastDataFileTag = slice_filename(segmentToastDataFile, (blocknum / DF_FILE_SLICE_BLOCKS)); + + copy_file_chunk_c(segmentToastDataFileTag, toast_relation_filename, + (blocknum % DF_FILE_SLICE_BLOCKS) * blockSize, extentPageCount * blockSize); + } + + /* copy SegmentHead.level1_slots */ + for (unsigned int l1_slot = 0; l1_slot < BMT_HEADER_LEVEL1_SLOTS; l1_slot++) { + unsigned int mbt_blocknum = seg_toast_head->level1_slots[l1_slot]; + if (mbt_blocknum == 0) { + break; + } + FILE *mbt_fp = fopen(slice_filename(fileName, (mbt_blocknum / DF_FILE_SLICE_BLOCKS)), "rb"); + if (mbt_fp) { + BMTLevel0Page *bmt_level0_page = new BMTLevel0Page(); + res = readHeadPage(mbt_fp, (mbt_blocknum % DF_FILE_SLICE_BLOCKS), bmt_level0_page, + BMTLEVEL0_MAGIC); + if (res != 0) { + printf("Error: BMTLEVEL0_MAGIC not found\n"); + exit(-1); + } + for (unsigned int mbt_slot = 0; mbt_slot < BMT_LEVEL0_SLOTS; mbt_slot++) { + unsigned int mbt_slot_blocknum = bmt_level0_page->slots[mbt_slot]; + if (mbt_slot_blocknum == 0) { + break; + } + rc = snprintf_s(segmentToastDataFile, MAXPGPATH, MAXPGPATH, "%s/%d", toast_relation_path, + EXTENT_8192); + securec_check(rc, "\0", "\0"); + segmentToastDataFileTag = + slice_filename(segmentToastDataFile, (mbt_slot_blocknum / DF_FILE_SLICE_BLOCKS)); + copy_file_chunk_c(segmentToastDataFileTag, toast_relation_filename, + (mbt_slot_blocknum % DF_FILE_SLICE_BLOCKS) * blockSize, + EXT_SIZE_8192 * blockSize); + } + } + } + } + + DumpSegmentContent(seg_head, blockStart, blockEnd); + + /* remove temp toast file */ + if (access(toast_relation_filename, F_OK) == 0 && strstr(toast_relation_filename, SEGTOASTTAG) != NULL) { + if (remove(toast_relation_filename) != 0) { + printf("Error: toastfile: %s remove failed\n", toast_relation_filename); + } + } +} + +void DumpSegmentContent(SegmentHead *segmentHead, unsigned int block_start, unsigned int block_end) +{ + /* Filename of TOAST relation file */ + FILE *toast_rel_fp; + errno_t rc; + char *segment_directory; + char segmentDataFile[MAXPGPATH]; + char *segmentDataFileTag; + + unsigned int blkstart = 0; + unsigned int blkend = 0; + int current_level1_index = -1; + BMTLevel0Page *bmt_level0_page = new BMTLevel0Page(); + + block_location_t *block_location_start = new block_location_t(); + block_location_t *block_location_end = new block_location_t(); + convertBlockLocation(block_start, block_location_start); + convertBlockLocation(block_end, block_location_end); + + block_location_t *block_location_current = block_location_start; + + segment_directory = strdup(fileName); + get_parent_directory(segment_directory); + + while (block_location_current->block_id <= block_location_end->block_id) { + + if (block_location_current->is_level1) { + if (current_level1_index != (int)block_location_current->level1_id) { + current_level1_index = block_location_current->level1_id; + unsigned int mbt_slot_blocknum = segmentHead->level1_slots[current_level1_index]; + FILE *mbt_fp = fopen(slice_filename(fileName, (mbt_slot_blocknum / DF_FILE_SLICE_BLOCKS)), "rb"); + if (mbt_fp) { + int res = readHeadPage(mbt_fp, (mbt_slot_blocknum % DF_FILE_SLICE_BLOCKS), bmt_level0_page, + BMTLEVEL0_MAGIC); + if (res != 0) { + printf("Error: BMTLEVEL0_MAGIC not found\n"); + exit(-1); + } + } + } + blkstart = blkend = bmt_level0_page->slots[block_location_current->level0_id]; + rc = snprintf_s(segmentDataFile, MAXPGPATH, MAXPGPATH, "%s/%d", + *segment_directory ? segment_directory : ".", EXTENT_8192); + securec_check(rc, "\0", "\0"); + } else { + blkstart = blkend = segmentHead->level0_slots[block_location_current->level0_id]; + rc = snprintf_s(segmentDataFile, MAXPGPATH, MAXPGPATH, "%s/%d", + *segment_directory ? segment_directory : ".", block_location_current->fileno); + securec_check(rc, "\0", "\0"); + } + + blkstart += block_location_current->block_offset; + if (block_location_current->extent_id == block_location_end->extent_id) { + blkend += block_location_end->block_offset; + } else { + blkend += (ExtentSizeByCount(block_location_current->extent_id) - 1); + } + + segmentDataFileTag = slice_filename(segmentDataFile, (blkstart / DF_FILE_SLICE_BLOCKS)); + + toast_rel_fp = fopen(segmentDataFileTag, "rb"); + + if (toast_rel_fp) { + exitCode = + DumpFileContents(blockOptions, controlOptions, toast_rel_fp, blockSize, blkstart % DF_FILE_SLICE_BLOCKS, + blkend % DF_FILE_SLICE_BLOCKS, false, /* is toast realtion */ + 0, /* no toast Oid */ + 0, /* no toast external size */ + NULL /* no out toast value */ + ); + } else { + rc = OPT_RC_FILE; + printf("Error: Could not open file <%s>.\n", segmentDataFile); + exitCode = 1; + } + convertBlockLocation(ExtentIdToLogicBlockNum(block_location_current->extent_id + 2), block_location_current); + } +} + +/* 通过blockNo 计算 block 所在的位置 level1, level0, block_offset */ +void convertBlockLocation(unsigned int block_id, block_location_t *block_location) +{ + block_location->is_level1 = false; + block_location->level1_id = 0; + block_location->block_id = block_id; + if (block_id < EXT_SIZE_8_TOTAL_PAGES) { + block_location->fileno = 2; + block_location->extent_id = block_id / EXT_SIZE_8; + block_location->extent_offset = block_location->extent_id; + block_location->block_offset = block_id % EXT_SIZE_8; + } else if (block_id < EXT_SIZE_128_TOTAL_PAGES) { + block_location->fileno = 3; + block_id -= EXT_SIZE_8_TOTAL_PAGES; + block_location->extent_offset = block_id / EXT_SIZE_128; + block_location->extent_id = EXT_SIZE_8_BOUNDARY + block_location->extent_offset; + block_location->block_offset = block_id % EXT_SIZE_128; + } else if (block_id < EXT_SIZE_1024_TOTAL_PAGES) { + block_location->fileno = 4; + block_id -= EXT_SIZE_128_TOTAL_PAGES; + block_location->extent_offset = block_id / EXT_SIZE_1024; + block_location->extent_id = EXT_SIZE_128_BOUNDARY + block_location->extent_offset; + block_location->block_offset = block_id % EXT_SIZE_1024; + } else { + block_location->fileno = 5; + block_id -= EXT_SIZE_1024_TOTAL_PAGES; + block_location->extent_offset = block_id / EXT_SIZE_8192; + block_location->extent_id = EXT_SIZE_1024_BOUNDARY + block_location->extent_offset; + block_location->block_offset = block_id % EXT_SIZE_8192; + } + + if (block_location->extent_id < BMT_HEADER_LEVEL0_SLOTS) { + block_location->level0_id = block_location->extent_id; + } else { + unsigned int left_extents = block_location->extent_id - BMT_HEADER_LEVEL0_SLOTS; + block_location->level1_id = left_extents / BMT_LEVEL0_SLOTS; + block_location->level0_id = left_extents % BMT_LEVEL0_SLOTS; + block_location->is_level1 = true; + } +} + +/** + * @brief 复制文件的某一块到另一个文件 + * @param src_path 源文件路径 + * @param dest_path 目标文件路径 + * @param offset 起始偏移量(字节) + * @param size 要复制的字节数 + * @return 是否成功 + */ +bool copy_file_chunk_c(const char *src_path, const char *dest_path, long offset, long size) +{ + FILE *src_file = fopen(src_path, "rb"); + if (!src_file) { + fprintf(stderr, "Failed to open source file: %s\n", strerror(errno)); + return false; + } + + /* 获取目标文件的目录路径 */ + std::string dest_dir(dest_path); + size_t last_slash = dest_dir.find_last_of('/'); + if (last_slash != std::string::npos) { + dest_dir = dest_dir.substr(0, last_slash); + /* 检查目录是否存在,如果不存在则创建 */ + struct stat st; + if (stat(dest_dir.c_str(), &st) != 0) { + if (mkdir(dest_dir.c_str(), 0777) != 0) { + fprintf(stderr, "Failed to create directory: %s\n", strerror(errno)); + fclose(src_file); + return false; + } + } else if (!S_ISDIR(st.st_mode)) { + fprintf(stderr, "Destination path is not a directory: %s\n", dest_dir.c_str()); + fclose(src_file); + return false; + } + } + + FILE *dest_file = fopen(dest_path, "ab"); + + if (!dest_file) { + fprintf(stderr, "Failed to open destination file: %s\n", strerror(errno)); + fclose(src_file); + return false; + } + + /* 定位到起始偏移量 */ + if (fseek(src_file, offset, SEEK_SET) != 0) { + fprintf(stderr, "fseek failed: %s\n", strerror(errno)); + fclose(src_file); + fclose(dest_file); + return false; + } + + /* 读取并写入数据 */ + const size_t buffer_size = blockSize; // 8KB 缓冲区 + char buffer[buffer_size]; + long remaining = size; + + while (remaining > 0) { + size_t read_size = (remaining > (long)buffer_size) ? buffer_size : remaining; + size_t bytes_read = fread(buffer, 1, read_size, src_file); + + if (bytes_read == 0) { + if (feof(src_file)) { + break; // 文件结束 + } else { + fprintf(stderr, "fread error: %s\n", strerror(errno)); + fclose(src_file); + fclose(dest_file); + return false; + } + } + + size_t bytes_written = fwrite(buffer, 1, bytes_read, dest_file); + if (bytes_written != bytes_read) { + fprintf(stderr, "fwrite error: %s\n", strerror(errno)); + fclose(src_file); + fclose(dest_file); + return false; + } + + remaining -= bytes_read; + } + + fclose(src_file); + fclose(dest_file); + return true; +} \ No newline at end of file diff --git a/contrib/gs_filedump/segment.h b/contrib/gs_filedump/segment.h new file mode 100644 index 0000000000000000000000000000000000000000..c2550fadc2df4505ba48a0cf2cccb5fe069675b7 --- /dev/null +++ b/contrib/gs_filedump/segment.h @@ -0,0 +1,27 @@ +#ifndef _GS_FILEDUMP_SEGMENT_H_ +#define _GS_FILEDUMP_SEGMENT_H_ + +typedef struct block_location_t { + unsigned int block_id; + unsigned int extent_id; // extent id + unsigned int fileno; // file no [ 2 - 5 ] + bool is_level1; // level1 is used + unsigned int level1_id; // level1 index [ 0 - 256 ) + unsigned int level0_id; // level0 index + unsigned int extent_offset; // extent offset + unsigned int block_offset; // block offset +} block_location_t; + +char *slice_filename(char *filename, int sliceno); +template +int readHeadPage(FILE *fp, unsigned int relfilenode, T *page, long int magic); + +/* 通过blockNo 计算 block 所在的位置 level1, level0, block_offset */ +void convertBlockLocation(unsigned int block_id, block_location_t *block_location); +void DumpSegmentContent(SegmentHead *segmentHead, unsigned int block_start, unsigned int block_end); + +bool copy_file_chunk_c(const char *src_path, const char *dest_path, long offset, long size); + +void initSegmentInfo(FILE *fp, FILE *fp_toast); + +#endif \ No newline at end of file diff --git a/contrib/gs_filedump/sql/datatypes.sql b/contrib/gs_filedump/sql/datatypes.sql new file mode 100644 index 0000000000000000000000000000000000000000..e1a77036da9c036d7c7fe30245be5f18bf5a3409 --- /dev/null +++ b/contrib/gs_filedump/sql/datatypes.sql @@ -0,0 +1,116 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out + +select oid as datoid from pg_database where datname = current_database() \gset + +---------------------------------------------------------------------------------------------- + +create table "int,text" (i int, t text); +insert into "int,text" values (1, 'one'), (null, 'two'), (3, null), (4, 'four'); +\set relname int,text +\ir run_test.sql + +-- do one test without options +\! pg_filedump int,text.heap | sed -e 's/logid ./logid ./' -e 's/recoff 0x......../recoff 0x......../' + +---------------------------------------------------------------------------------------------- + +create table bigint (x bigint); +insert into bigint values (-1), (0), (1), (null); +\set relname bigint +\ir run_test.sql + +create table bool (x bool); +insert into bool values (true), (false), (null); +\set relname bool +\ir run_test.sql + +create table char (x "char"); +insert into char values ('x'), (null); +\set relname char +\ir run_test.sql + +create table "charN" (x char(5)); +insert into "charN" values ('x'), ('xxxxx'), (null); +\set relname charN +\ir run_test.sql + +create table date (x date); +insert into date values ('2000-01-01'), ('1900-02-02'), ('2100-12-31'), ('100-01-01 BC'), ('-infinity'), ('infinity'), (null); +\set relname date +\ir run_test.sql + +create table int (x int); +insert into int values (-1), (0), (1), (null); +\set relname int +\ir run_test.sql + +create table json (x json); +insert into json values ('1'), ('"one"'), ('{"a":"b"}'), ('null'), (null); +\set relname json +\ir run_test.sql + +create table macaddr (x macaddr); +insert into macaddr values ('00:10:20:30:40:50'), (null); +\set relname macaddr +\ir run_test.sql + +create table name (x name); +insert into name values ('name'), ('1234567890123456789012345678901234567890123456789012345678901234567890'), (null); +\set relname name +\ir run_test.sql + +create table oid (x oid); +insert into oid values (-1), (0), (1), (null); +\set relname oid +\ir run_test.sql + +create table smallint (x smallint); +insert into smallint values (-1), (0), (1), (null); +\set relname smallint +\ir run_test.sql + +create table text (x text); +insert into text values ('hello world'), (null); +\set relname text +\ir run_test.sql + +create table time (x time); +insert into time values ('00:00'), ('23:59:59'), ('23:59:60'), (null); +\set relname time +\ir run_test.sql + +create table timestamp (x timestamp); +insert into timestamp values ('2000-01-01 00:00'), ('100-01-01 BC 2:22'), ('infinity'), ('-infinity'), (null); +\set relname timestamp +\ir run_test.sql + +set timezone = 'Etc/UTC'; +create table timestamptz (x timestamptz); +insert into timestamptz values ('2000-01-01 00:00'), ('100-01-01 BC 2:22'), ('infinity'), ('-infinity'), (null); +\set relname timestamptz +\ir run_test.sql + +create table timetz (x timetz); +insert into timetz values ('00:00 Etc/UTC'), ('23:59:59 Etc/UTC'), ('23:59:60 Etc/UTC'), ('1:23+4:56'), (null); +\set relname timetz +\ir run_test.sql + +create table uuid (x uuid); +insert into uuid values ('b4f0e2d6-429b-48bd-af06-6578829dd980'), ('00000000-0000-0000-0000-000000000000'), (null); +\set relname uuid +\ir run_test.sql + +create table varchar (x varchar); +insert into varchar values ('Hello World'), (''), (null); +\set relname varchar +\ir run_test.sql + +create table "varcharN" (x varchar(11)); +insert into "varcharN" values ('Hello World'), (''), (null); +\set relname varcharN +\ir run_test.sql + +create table xid (x xid); +insert into xid values ('-1'), ('0'), ('1'), (null); +\set relname xid +\ir run_test.sql diff --git a/contrib/gs_filedump/sql/float.sql b/contrib/gs_filedump/sql/float.sql new file mode 100644 index 0000000000000000000000000000000000000000..3c937b8d85ef915cb64d5f151741a6c51d1d4fd8 --- /dev/null +++ b/contrib/gs_filedump/sql/float.sql @@ -0,0 +1,16 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG12+ output in *.out/*_3.out, earlier in *_1.out/*_4.out + +select oid as datoid from pg_database where datname = current_database() \gset + +---------------------------------------------------------------------------------------------- + +create table float4 (x float4); +insert into float4 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float4 +\ir run_test.sql + +create table float8 (x float8); +insert into float8 values (0), ('-0'), ('-infinity'), ('infinity'), ('NaN'), (null); +\set relname float8 +\ir run_test.sql diff --git a/contrib/gs_filedump/sql/numeric.sql b/contrib/gs_filedump/sql/numeric.sql new file mode 100644 index 0000000000000000000000000000000000000000..59f2376ed88324a08de318d671b040b2e5282e3c --- /dev/null +++ b/contrib/gs_filedump/sql/numeric.sql @@ -0,0 +1,12 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- PG14+ output in *.out/*_3.out, earlier in *_1.out/*_4.out + +select oid as datoid from pg_database where datname = current_database() \gset + +---------------------------------------------------------------------------------------------- + +create table numeric (x numeric); +insert into numeric values (0), ('12341234'), ('-567890'), ('NaN'), (null); +insert into numeric values ('-Infinity'), ('Infinity'); -- needs PG 14 +\set relname numeric +\ir run_test.sql diff --git a/contrib/gs_filedump/sql/toast.sql b/contrib/gs_filedump/sql/toast.sql new file mode 100644 index 0000000000000000000000000000000000000000..985f1003871c8880ac07a5fd7b9c376c7dd7bce4 --- /dev/null +++ b/contrib/gs_filedump/sql/toast.sql @@ -0,0 +1,38 @@ +-- PG14+ output in toast.out/_3.out (32-bit); PG13- output in toast_1.out/_4.out + +create table toast ( + description text, + data text +); + +insert into toast values ('short inline', 'xxx'); +insert into toast values ('long inline uncompressed', repeat('x', 200)); + +alter table toast alter column data set storage external; +insert into toast values ('external uncompressed', repeat('0123456789 8< ', 200)); + +alter table toast alter column data set storage extended; +insert into toast values ('inline compressed pglz', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed pglz', repeat('0123456789 8< ', 20000)); + +alter table toast alter column data set compression lz4; +insert into toast values ('inline compressed lz4', repeat('0123456789 8< ', 200)); +insert into toast values ('extended compressed lz4', repeat('0123456789 8< ', 50000)); + +vacuum toast; +checkpoint; + +-- copy tables where client can read it +\set relname 'toast' +select oid as datoid from pg_database where datname = current_database() \gset +select relfilenode, reltoastrelid from pg_class where relname = :'relname' \gset +select lo_import(format('base/%s/%s', :'datoid', :'relfilenode')) as loid \gset +\set output :relname '.heap' +\lo_export :loid :output +select lo_import(format('base/%s/%s', :'datoid', :'reltoastrelid')) as toast_loid \gset +\set output :reltoastrelid +\lo_export :toast_loid :output + +\setenv relname :relname +\! pg_filedump -D text,text $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" +\! pg_filedump -D text,text -t $relname.heap | sed -e "s/logid ./logid ./" -e "s/recoff 0x......../recoff 0x......../" -e 's/id: ...../id: ...../g' -e 's/ 8< .*//' diff --git a/contrib/gs_filedump/sql/xml.sql b/contrib/gs_filedump/sql/xml.sql new file mode 100644 index 0000000000000000000000000000000000000000..a9de62f7b556eb7179a7a836c45689990198d032 --- /dev/null +++ b/contrib/gs_filedump/sql/xml.sql @@ -0,0 +1,11 @@ +-- 64 bit output in *.out, 32 bit output in *_3.out +-- server without --with-libxml support output in *_1.out + +select oid as datoid from pg_database where datname = current_database() \gset + +---------------------------------------------------------------------------------------------- + +create table xml (x xml); +insert into xml values (''), (null); +\set relname xml +\ir run_test.sql diff --git a/contrib/gs_filedump/stringinfo.cpp b/contrib/gs_filedump/stringinfo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d9009fc9ab0ca30c922b964c7592597071be4ab --- /dev/null +++ b/contrib/gs_filedump/stringinfo.cpp @@ -0,0 +1,140 @@ +/* + * Code mostly borrowed from PostgreSQL's stringinfo.c + * palloc replaced to malloc, etc. + */ + +#include "postgres_fe.h" +#include "postgres.h" +#include + +#define MaxAllocSize ((Size)0x3fffffff) /* 1 gigabyte - 1 */ + +/*------------------------- + * StringInfoData holds information about an extensible string. + * data is the current buffer for the string. + * len is the current string length. There is guaranteed to be + * a terminating '\0' at data[len], although this is not very + * useful when the string holds binary data rather than text. + * maxlen is the allocated size in bytes of 'data', i.e. the maximum + * string size (including the terminating '\0' char) that we can + * currently store in 'data' without having to reallocate + * more space. We must always have maxlen > len. + * cursor is initialized to zero by makeStringInfo or initStringInfo, + * but is not otherwise touched by the stringinfo.c routines. + * Some routines use it to scan through a StringInfo. + *------------------------- + */ + +/* + * initStringInfo + * + * Initialize a StringInfoData struct (with previously undefined contents) + * to describe an empty string. + */ +void initStringInfo(StringInfo str) +{ + int size = 1024; /* initial default buffer size */ + + str->data = (char *)malloc(size); + str->maxlen = size; + resetStringInfo(str); +} + +/* + * appendStringInfoString + * + * Append a null-terminated string to str. + */ +void appendStringInfoString(StringInfo str, const char *s) +{ + appendBinaryStringInfo(str, s, strlen(s)); +} + +/* + * appendBinaryStringInfo + * + * Append arbitrary binary data to a StringInfo, allocating more space + * if necessary. + */ +void appendBinaryStringInfo(StringInfo str, const char *data, int datalen) +{ + Assert(str != NULL); + + /* Make more room if needed */ + enlargeStringInfo(str, datalen); + + /* OK, append the data */ + errno_t rc = memcpy_s(str->data + str->len, (size_t)(str->maxlen - str->len), data, (size_t)datalen); + securec_check(rc, "\0", "\0"); + str->len += datalen; + + /* + * Keep a trailing null in place, even though it's probably useless for + * binary data. (Some callers are dealing with text but call this because + * their input isn't null-terminated.) + */ + str->data[str->len] = '\0'; +} + +/* + * enlargeBuffer + * + * Make sure there is enough space for 'needed' more bytes + * ('needed' does not include the terminating null). + * + * NB: because we use repalloc() to enlarge the buffer, the string buffer + * will remain allocated in the same memory context that was current when + * initStringInfo was called, even if another context is now current. + * This is the desired and indeed critical behavior! + */ +void enlargeBuffer(int needed, // needed more bytes + int len, // current used buffer length in bytes + int *maxlen, // original/new allocated buffer length + char **data) // pointer to original/new buffer +{ + int newlen; + + /* + * Guard against out-of-range "needed" values. Without this, we can get + * an overflow or infinite loop in the following. + */ + /* should not happen */ + if (unlikely(needed < 0)) { + printf("Error: invalid string enlargement request size: %d\n", needed); + exit(1); + } + + needed += len + 1; /* total space required now */ + + /* Because of the above test, we now have needed <= MaxAllocSize */ + if (likely(needed <= (int)*maxlen)) { + return; /* got enough space already */ + } + + if (unlikely(((Size)len > MaxAllocSize) || ((Size)(needed - 1)) >= MaxAllocSize)) { + printf("out of memory\n"); + printf("Cannot enlarge buffer containing %d bytes by %d more bytes.\n", len, needed); + exit(1); + } + /* + * We don't want to allocate just a little more space with each append; + * for efficiency, double the buffer size each time it overflows. + * Actually, we might need to more than double it if 'needed' is big... + */ + newlen = 2 * *maxlen; + while (needed > newlen) { + newlen = 2 * newlen; + } + + /* + * Clamp to MaxAllocSize in case we went past it. Note we are assuming + * here that MaxAllocSize <= INT_MAX/2, else the above loop could + * overflow. We will still have newlen >= needed. + */ + if (newlen > (int)MaxAllocSize) { + newlen = (int)MaxAllocSize; + } + + *data = (char *)realloc(*data, newlen); + *maxlen = newlen; +} \ No newline at end of file diff --git a/contrib/gs_filedump/t/001_basic.pl b/contrib/gs_filedump/t/001_basic.pl new file mode 100644 index 0000000000000000000000000000000000000000..84077b03525e256d2628c39e5422f40ebb9adaab --- /dev/null +++ b/contrib/gs_filedump/t/001_basic.pl @@ -0,0 +1,130 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use Config; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; +use File::Spec; +use IPC::Run qw( run timeout ); + + +note "setting up PostgreSQL instance"; + +my $node = PostgreSQL::Test::Cluster->new('master'); +$node->init(extra => ["--data-checksums"]); +$node->append_conf('postgresql.conf', 'fsync = True'); +$node->start; + +my $query = qq( + create table t1(a int, b text, c bigint, filler char(400)); + insert into t1 values (1, 'asdasd1', 29347293874234444); + insert into t1 values (2, 'asdasd2', 29347293874234445); + insert into t1 values (3, 'asdasd', 29347293874234446); + insert into t1 values (4, 'asdasd', 29347293874234447); + checkpoint; +); +$node->safe_psql('postgres', $query); + +note "running tests"; + +test_basic_output(); +test_btree_output(); +test_spgist_output(); +test_gin_output(); + +$node->stop; +done_testing(); + +sub get_table_location +{ + return File::Spec->catfile( + $node->data_dir, + $node->safe_psql('postgres', qq(SELECT pg_relation_filepath('@_');)) + ); +} + +sub run_pg_filedump +{ + my ($rel, @options) = @_; + my ($stdout, $stderr); + + my $loc = get_table_location($rel); + my $cmd = [ 'pg_filedump', @options, $loc ]; + my $result = run $cmd, '>', \$stdout, '2>', \$stderr + or die "Error: could not execute pg_filedump"; + + ok($stdout !~ qr/Error/, "error not found"); + + return $stdout; +} + +sub test_basic_output +{ + my $out_ = run_pg_filedump('t1', ("-D", "int,text,bigint")); + + ok($out_ =~ qr/Header/, "Header found"); + ok($out_ =~ qr/COPY: 1/, "first COPY found"); + ok($out_ =~ qr/COPY: 2/, "second COPY found"); + ok($out_ =~ qr/COPY: 3/, "third COPY found"); + ok($out_ =~ qr/COPY: 4/, "fourth COPY found"); + ok($out_ =~ qr/29347293874234447/, "number found"); + ok($out_ =~ qr/asdasd/, "string found"); +} + +sub test_btree_output +{ + my $query = qq( + insert into t1 select * FROM generate_series(1, 10000); + create index i1 on t1(b); + checkpoint; + ); + $node->safe_psql('postgres', $query); + + my $out_ = run_pg_filedump('i1', ('-i')); + + ok($out_ =~ qr/Header/, "Header found"); + ok($out_ =~ qr/BTree Index Section/, "BTree Index Section found"); + ok($out_ =~ qr/BTree Meta Data/, "BTree Meta Data found"); + ok($out_ =~ qr/Item 3/, "Item found"); + ok($out_ =~ qr/Previous/, "Previous item found"); + ok($out_ =~ qr/Next/, "Next item found"); + ok($out_ =~ qr/Level/, "Level found"); + ok($out_ !~ qr/Next XID/, "Next XID not found"); + + # make leaf with BTP_DELETED flag + $node->safe_psql('postgres', "delete from t1 where a >= 2000 and a < 4000;"); + $node->safe_psql('postgres', "vacuum t1; checkpoint;"); + + $out_ = run_pg_filedump('i1', ('-i')); + + ok($out_ =~ qr/Next XID/, "Next XID found"); +} + +sub test_spgist_output +{ + $node->safe_psql('postgres', "create index i2 on t1 using spgist(b); checkpoint;"); + + my $out_ = run_pg_filedump('i2'); + + ok($out_ =~ qr/Header/, "Header found"); + ok($out_ =~ qr/SPGIST Index Section/, "SPGIST Index Section found"); + ok($out_ =~ qr/Item 4/, "Item found"); +} + +sub test_gin_output +{ + my $query = qq( + create extension btree_gin; + create index i3 on t1 using gin(b); + checkpoint; + ); + $node->safe_psql('postgres', $query); + + my $out_ = run_pg_filedump('i3'); + + ok($out_ =~ qr/Header/, "Header found"); + ok($out_ =~ qr/GIN Index Section/, "GIN Index Section found"); + ok($out_ =~ qr/ItemPointer 3/, "Item found"); +}