diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index fbbd867c239..58599c7aeaa 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -7167,7 +7167,7 @@ fetch_more_data_begin(AsyncRequest *areq)
snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
fsstate->fetch_size, fsstate->cursor_number);
- if (PQsendQuery(fsstate->conn, sql) < 0)
+ if (!PQsendQuery(fsstate->conn, sql))
pgfdw_report_error(ERROR, NULL, fsstate->conn, false, fsstate->query);
/* Remember that the request is in process */
diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml
index 02f04891129..eb5e9f48db1 100644
--- a/doc/src/sgml/client-auth.sgml
+++ b/doc/src/sgml/client-auth.sgml
@@ -417,7 +417,9 @@ hostnogssenc databaseuser
Specifies the authentication method to use when a connection matches
this record. The possible choices are summarized here; details
- are in .
+ are in . All the options
+ are lower case and treated case sensitively, so even acronyms like
+ ldap must be specified as lower case.
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index bd61286e042..bc3d0d1bd14 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -10458,6 +10458,25 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
+
+ allow_in_place_tablespaces (boolean)
+
+ allow_in_place_tablespaces configuration parameter
+
+
+
+
+ Allows tablespaces to be created as directories inside
+ pg_tblspc, when an empty location string
+ is provided to the CREATE TABLESPACE command. This
+ is intended to allow testing replication scenarios where primary and
+ standby servers are running on the same machine. Such directories
+ are likely to confuse backup tools that expect to find only symbolic
+ links in that location. Only superusers can change this setting.
+
+
+
+
allow_system_table_mods (boolean)
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index c358bff56d9..c85e92b3a2f 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -557,7 +557,7 @@ CREATE TABLE products (
tests, it cannot guarantee that the database will not reach a state
in which the constraint condition is false (due to subsequent changes
of the other row(s) involved). This would cause a database dump and
- reload to fail. The reload could fail even when the complete
+ restore to fail. The restore could fail even when the complete
database state is consistent with the constraint, due to rows not
being loaded in an order that will satisfy the constraint. If
possible, use UNIQUE, EXCLUDE,
@@ -569,10 +569,10 @@ CREATE TABLE products (
If what you desire is a one-time check against other rows at row
insertion, rather than a continuously-maintained consistency
guarantee, a custom trigger can be used
- to implement that. (This approach avoids the dump/reload problem because
+ to implement that. (This approach avoids the dump/restore problem because
pg_dump does not reinstall triggers until after
- reloading data, so that the check will not be enforced during a
- dump/reload.)
+ restoring data, so that the check will not be enforced during a
+ dump/restore.)
@@ -594,7 +594,7 @@ CREATE TABLE products (
function. PostgreSQL does not disallow
that, but it will not notice if there are rows in the table that now
violate the CHECK constraint. That would cause a
- subsequent database dump and reload to fail.
+ subsequent database dump and restore to fail.
The recommended way to handle such a change is to drop the constraint
(using ALTER TABLE), adjust the function definition,
and re-add the constraint, thereby rechecking it against all table rows.
diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml
index 5a2dc4a8ae8..9df09df4d77 100644
--- a/doc/src/sgml/ecpg.sgml
+++ b/doc/src/sgml/ecpg.sgml
@@ -1890,7 +1890,8 @@ EXEC SQL SELECT b INTO :val :val_ind FROM test1;
The indicator variable val_ind will be zero if
the value was not null, and it will be negative if the value was
- null.
+ null. (See to enable
+ Oracle-specific behavior.)
@@ -9801,6 +9802,42 @@ risnull(CINTTYPE, (char *) &i);
+
+ Oracle Compatibility Mode
+
+ ecpg can be run in a so-called Oracle
+ compatibility mode. If this mode is active, it tries to
+ behave as if it were Oracle Pro*C.
+
+
+
+ Specifically, this mode changes ecpg in three ways:
+
+
+
+
+ Pad character arrays receiving character string types with
+ trailing spaces to the specified length
+
+
+
+
+
+ Zero byte terminate these character arrays, and set the indicator
+ variable if truncation occurs
+
+
+
+
+
+ Set the null indicator to -1 when character
+ arrays receive empty character string types
+
+
+
+
+
+
Internals
diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml
index bb0b2679bbb..8b8ccd9d4c0 100644
--- a/doc/src/sgml/extend.sgml
+++ b/doc/src/sgml/extend.sgml
@@ -982,7 +982,7 @@ SET LOCAL search_path TO @extschema@, pg_temp;
pg_dump. But that behavior is undesirable for a
configuration table; any data changes made by the user need to be
included in dumps, or the extension will behave differently after a dump
- and reload.
+ and restore.
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index ceb09d788cc..16ad120dd23 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -1840,6 +1840,9 @@ repeat('Pg', 4) PgPgPgPg
subsequent random() calls in the current session
can be repeated by re-issuing setseed() with the same
argument.
+ Without any prior setseed() call in the same
+ session, the first random() call obtains a seed
+ from a platform-dependent source of random bits.
diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 9c67c9d1c50..949bba7c768 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -5607,7 +5607,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
- Base backup phases
+ Base Backup Phases
diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml
index 3d3cbb339ce..d357799e53b 100644
--- a/doc/src/sgml/mvcc.sgml
+++ b/doc/src/sgml/mvcc.sgml
@@ -875,7 +875,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
Table-Level Lock Modes
- ACCESS SHARE
+ ACCESS SHARE (AccessShareLock)
@@ -893,7 +893,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- ROW SHARE
+ ROW SHARE (RowShareLock)
@@ -914,7 +914,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- ROW EXCLUSIVE
+ ROW EXCLUSIVE (RowExclusiveLock)
@@ -936,7 +936,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- SHARE UPDATE EXCLUSIVE
+ SHARE UPDATE EXCLUSIVE (ShareUpdateExclusiveLock)
@@ -962,7 +962,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- SHARE
+ SHARE (ShareLock)
@@ -982,7 +982,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- SHARE ROW EXCLUSIVE
+ SHARE ROW EXCLUSIVE (ShareRowExclusiveLock)
@@ -1004,7 +1004,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- EXCLUSIVE
+ EXCLUSIVE (ExclusiveLock)
@@ -1026,7 +1026,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
- ACCESS EXCLUSIVE
+ ACCESS EXCLUSIVE (AccessExclusiveLock)
diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml
index 9cf8ebea808..749d4693744 100644
--- a/doc/src/sgml/perform.sgml
+++ b/doc/src/sgml/perform.sgml
@@ -1785,7 +1785,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
Dump scripts generated by pg_dump automatically apply
- several, but not all, of the above guidelines. To reload a
+ several, but not all, of the above guidelines. To restore a
pg_dump dump as quickly as possible, you need to
do a few extra things manually. (Note that these points apply while
restoring a dump, not while creating it.
diff --git a/doc/src/sgml/plhandler.sgml b/doc/src/sgml/plhandler.sgml
index 40ee59de9f3..980c95ecf39 100644
--- a/doc/src/sgml/plhandler.sgml
+++ b/doc/src/sgml/plhandler.sgml
@@ -156,7 +156,7 @@
attached to a function when check_function_bodies is on.
Therefore, checks whose results might be affected by GUC parameters
definitely should be skipped when check_function_bodies is
- off, to avoid false failures when reloading a dump.
+ off, to avoid false failures when restoring a dump.
diff --git a/doc/src/sgml/ref/alter_function.sgml b/doc/src/sgml/ref/alter_function.sgml
index 3c99b450e0a..ee94c34ae38 100644
--- a/doc/src/sgml/ref/alter_function.sgml
+++ b/doc/src/sgml/ref/alter_function.sgml
@@ -161,8 +161,10 @@ ALTER FUNCTION name [ ( [ [ extension_name
- The name of the extension that the procedure is to depend on.
+ This form marks the procedure as dependent on the extension, or no longer
+ dependent on the extension if NO is specified.
+ A procedure that's marked as dependent on an extension is dropped when the
+ extension is dropped, even if cascade is not specified.
+ A procedure can depend upon multiple extensions, and will be dropped when
+ any one of those extensions is dropped.
diff --git a/doc/src/sgml/ref/alter_type.sgml b/doc/src/sgml/ref/alter_type.sgml
index 21887e88a0f..146065144f5 100644
--- a/doc/src/sgml/ref/alter_type.sgml
+++ b/doc/src/sgml/ref/alter_type.sgml
@@ -411,7 +411,7 @@ ALTER TYPE name SET ( since the original creation of the enum type). The slowdown is
usually insignificant; but if it matters, optimal performance can be
regained by dropping and recreating the enum type, or by dumping and
- reloading the database.
+ restoring the database.
diff --git a/doc/src/sgml/ref/create_domain.sgml b/doc/src/sgml/ref/create_domain.sgml
index e4b856d630c..82a0b874929 100644
--- a/doc/src/sgml/ref/create_domain.sgml
+++ b/doc/src/sgml/ref/create_domain.sgml
@@ -234,7 +234,7 @@ INSERT INTO tab (domcol) VALUES ((SELECT domcol FROM tab WHERE false));
function. PostgreSQL does not disallow that,
but it will not notice if there are stored values of the domain type that
now violate the CHECK constraint. That would cause a
- subsequent database dump and reload to fail. The recommended way to
+ subsequent database dump and restore to fail. The recommended way to
handle such a change is to drop the constraint (using ALTER
DOMAIN), adjust the function definition, and re-add the
constraint, thereby rechecking it against stored data.
diff --git a/doc/src/sgml/ref/drop_extension.sgml b/doc/src/sgml/ref/drop_extension.sgml
index 5e507dec928..c01ddace84c 100644
--- a/doc/src/sgml/ref/drop_extension.sgml
+++ b/doc/src/sgml/ref/drop_extension.sgml
@@ -30,7 +30,9 @@ DROP EXTENSION [ IF EXISTS ] name [
DROP EXTENSION removes extensions from the database.
- Dropping an extension causes its component objects to be dropped as well.
+ Dropping an extension causes its component objects, and other explicitly
+ dependent routines (see ,
+ the depends on extension action), to be dropped as well.
@@ -77,9 +79,9 @@ DROP EXTENSION [ IF EXISTS ] name [
RESTRICT
- Refuse to drop the extension if any objects depend on it (other than
- its own member objects and other extensions listed in the same
- DROP command). This is the default.
+ This option prevents the specified extensions from being dropped
+ if there exists non-extension-member objects that depends on any
+ the extensions. This is the default.
diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml
index 2973b72b815..c3f49f73980 100644
--- a/doc/src/sgml/ref/insert.sgml
+++ b/doc/src/sgml/ref/insert.sgml
@@ -75,6 +75,11 @@ INSERT INTO table_name [ AS
+ INSERT into tables that lack unique indexes will
+ not be blocked by concurrent activity. Tables with unique indexes
+ might block if concurrent sessions perform actions that lock or modify
+ rows matching the unique index values being inserted; the details
+ are covered in .
ON CONFLICT can be used to specify an alternative
action to raising a unique constraint or exclusion constraint
violation error. (See below.)
@@ -176,7 +181,7 @@ INSERT INTO table_name [ AS ON CONFLICT DO UPDATE
targets a table named excluded, since that will otherwise
- be taken as the name of the special table representing rows proposed
+ be taken as the name of the special table representing the row proposed
for insertion.
@@ -396,7 +401,7 @@ INSERT INTO table_name [ AS SET and
WHERE clauses in ON CONFLICT DO
UPDATE have access to the existing row using the
- table's name (or an alias), and to rows proposed for insertion
+ table's name (or an alias), and to the row proposed for insertion
using the special excluded table.
SELECT privilege is required on any column in the
target table where corresponding excluded
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index ca6ff8cdc65..956f97e2537 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -371,9 +371,9 @@ PostgreSQL documentation
Requesting exclusive locks on database objects while running a parallel dump could
- cause the dump to fail. The reason is that the pg_dump coordinator process
- requests shared locks on the objects that the worker processes are going to dump later
- in order to
+ cause the dump to fail. The reason is that the pg_dump leader process
+ requests shared locks (ACCESS SHARE) on the
+ objects that the worker processes are going to dump later in order to
make sure that nobody deletes them and makes them go away while the dump is running.
If another client then requests an exclusive lock on a table, that lock will not be
granted but will be queued waiting for the shared lock of the coordinator process to be
@@ -694,7 +694,7 @@ PostgreSQL documentation
...). This will make restoration very slow; it is mainly
useful for making dumps that can be loaded into
non-PostgreSQL databases.
- Any error during reloading will cause only rows that are part of the
+ Any error during restoring will cause only rows that are part of the
problematic INSERT to be lost, rather than the
entire table contents.
@@ -718,9 +718,9 @@ PostgreSQL documentation
This option is relevant only when creating a data-only dump.
It instructs pg_dump to include commands
to temporarily disable triggers on the target tables while
- the data is reloaded. Use this if you have referential
+ the data is restored. Use this if you have referential
integrity checks or other triggers on the tables that you
- do not want to invoke during data reload.
+ do not want to invoke during data restore.
@@ -838,7 +838,7 @@ PostgreSQL documentation
than COPY). This will make restoration very slow;
it is mainly useful for making dumps that can be loaded into
non-PostgreSQL databases.
- Any error during reloading will cause only rows that are part of the
+ Any error during restoring will cause only rows that are part of the
problematic INSERT to be lost, rather than the
entire table contents. Note that the restore might fail altogether if
you have rearranged column order. The
@@ -857,12 +857,22 @@ PostgreSQL documentation
target the root of the partitioning hierarchy that contains it, rather
than the partition itself. This causes the appropriate partition to
be re-determined for each row when the data is loaded. This may be
- useful when reloading data on a server where rows do not always fall
+ useful when restoring data on a server where rows do not always fall
into the same partitions as they did on the original server. That
could happen, for example, if the partitioning column is of type text
and the two systems have different definitions of the collation used
to sort the partitioning column.
+
+
+ It is best not to use parallelism when restoring from an archive made
+ with this option, because pg_restore will
+ not know exactly which partition(s) a given archive data item will
+ load data into. This could result in inefficiency due to lock
+ conflicts between parallel jobs, or perhaps even restore failures due
+ to foreign key constraints being set up before all the relevant data
+ is loaded.
+
@@ -1021,7 +1031,7 @@ PostgreSQL documentation
Dump data as INSERT commands (rather than
COPY). Controls the maximum number of rows per
INSERT command. The value specified must be a
- number greater than zero. Any error during reloading will cause only
+ number greater than zero. Any error during restoring will cause only
rows that are part of the problematic INSERT to be
lost, rather than the entire table contents.
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index 5bde886c453..ae632f739cd 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -310,9 +310,9 @@ PostgreSQL documentation
This option is relevant only when creating a data-only dump.
It instructs pg_dumpall to include commands
to temporarily disable triggers on the target tables while
- the data is reloaded. Use this if you have referential
+ the data is restored. Use this if you have referential
integrity checks or other triggers on the tables that you
- do not want to invoke during data reload.
+ do not want to invoke during data restore.
@@ -389,7 +389,7 @@ PostgreSQL documentation
target the root of the partitioning hierarchy that contains it, rather
than the partition itself. This causes the appropriate partition to
be re-determined for each row when the data is loaded. This may be
- useful when reloading data on a server where rows do not always fall
+ useful when restoring data on a server where rows do not always fall
into the same partitions as they did on the original server. That
could happen, for example, if the partitioning column is of type text
and the two systems have different definitions of the collation used
@@ -549,7 +549,7 @@ PostgreSQL documentation
Dump data as INSERT commands (rather than
COPY). Controls the maximum number of rows per
INSERT command. The value specified must be a
- number greater than zero. Any error during reloading will cause only
+ number greater than zero. Any error during restoring will cause only
rows that are part of the problematic INSERT to be
lost, rather than the entire table contents.
@@ -824,7 +824,7 @@ PostgreSQL documentation
- To reload database(s) from this file, you can use:
+ To restore database(s) from this file, you can use:
$psql -f db.out postgres
diff --git a/doc/src/sgml/ref/pg_resetwal.sgml b/doc/src/sgml/ref/pg_resetwal.sgml
index 3e4882cdc65..fd539f56043 100644
--- a/doc/src/sgml/ref/pg_resetwal.sgml
+++ b/doc/src/sgml/ref/pg_resetwal.sgml
@@ -55,7 +55,7 @@ PostgreSQL documentation
After running this command, it should be possible to start the server,
but bear in mind that the database might contain inconsistent data due to
partially-committed transactions. You should immediately dump your data,
- run initdb, and reload. After reload, check for
+ run initdb, and restore. After restore, check for
inconsistencies and repair as needed.
@@ -78,7 +78,7 @@ PostgreSQL documentation
discussed below. If you are not able to determine correct values for all
these fields, can still be used, but
the recovered database must be treated with even more suspicion than
- usual: an immediate dump and reload is imperative. Do not
+ usual: an immediate dump and restore is imperative. Do not
execute any data-modifying operations in the database before you dump,
as any such action is likely to make the corruption worse.
diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index 93ea937ac8e..1b56a4afb36 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -538,9 +538,9 @@ PostgreSQL documentation
This option is relevant only when performing a data-only restore.
It instructs pg_restore to execute commands
to temporarily disable triggers on the target tables while
- the data is reloaded. Use this if you have referential
+ the data is restored. Use this if you have referential
integrity checks or other triggers on the tables that you
- do not want to invoke during data reload.
+ do not want to invoke during data restore.
@@ -958,7 +958,7 @@ CREATE DATABASE foo WITH TEMPLATE template0;
- To reload the dump into a new database called newdb:
+ To restore the dump into a new database called newdb:
$createdb -T template0 newdb
diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml
index f21563fb5b9..6069063b481 100644
--- a/doc/src/sgml/ref/pgupgrade.sgml
+++ b/doc/src/sgml/ref/pgupgrade.sgml
@@ -39,7 +39,7 @@ PostgreSQL documentation
pg_upgrade (formerly called pg_migrator) allows data
stored in PostgreSQL data files to be upgraded to a later PostgreSQL
- major version without the data dump/reload typically required for
+ major version without the data dump/restore typically required for
major version upgrades, e.g., from 9.5.8 to 9.6.4 or from 10.7 to 11.2.
It is not required for minor version upgrades, e.g., from 9.6.2 to 9.6.3
or from 10.1 to 10.2.
@@ -415,7 +415,7 @@ NET STOP postgresql-&majorversion;
The option allows multiple CPU cores to be used
- for copying/linking of files and to dump and reload database schemas
+ for copying/linking of files and to dump and restore database schemas
in parallel; a good place to start is the maximum of the number of
CPU cores and tablespaces. This option can dramatically reduce the
time to upgrade a multi-database server running on a multiprocessor
@@ -614,7 +614,8 @@ rsync --archive --delete --hard-links --size-only --no-inc-recursive /vol1/pg_tb
Configure the servers for log shipping. (You do not need to run
pg_start_backup() and pg_stop_backup()
or take a file system backup as the standbys are still synchronized
- with the primary.)
+ with the primary.) Replication slots are not copied and must
+ be recreated.
diff --git a/doc/src/sgml/ref/release_savepoint.sgml b/doc/src/sgml/ref/release_savepoint.sgml
index 39665d28efa..daf8eb9a436 100644
--- a/doc/src/sgml/ref/release_savepoint.sgml
+++ b/doc/src/sgml/ref/release_savepoint.sgml
@@ -82,8 +82,9 @@ RELEASE [ SAVEPOINT ] savepoint_name
- If multiple savepoints have the same name, only the one that was most
- recently defined is released.
+ If multiple savepoints have the same name, only the most recently defined
+ unreleased one is released. Repeated commands will release progressively
+ older savepoints.
diff --git a/doc/src/sgml/ref/savepoint.sgml b/doc/src/sgml/ref/savepoint.sgml
index b17342a1ee6..f84ac3d167f 100644
--- a/doc/src/sgml/ref/savepoint.sgml
+++ b/doc/src/sgml/ref/savepoint.sgml
@@ -53,7 +53,9 @@ SAVEPOINT savepoint_namesavepoint_name
- The name to give to the new savepoint.
+ The name to give to the new savepoint. If savepoints with the
+ same name already exist, they will be inaccessible until newer
+ identically-named savepoints are released.
@@ -106,6 +108,32 @@ COMMIT;
The above transaction will insert both 3 and 4.
+
+
+ To use a single savepoint name:
+
+BEGIN;
+ INSERT INTO table1 VALUES (1);
+ SAVEPOINT my_savepoint;
+ INSERT INTO table1 VALUES (2);
+ SAVEPOINT my_savepoint;
+ INSERT INTO table1 VALUES (3);
+
+ -- rollback to the second savepoint
+ ROLLBACK TO SAVEPOINT my_savepoint;
+ SELECT * FROM table1; -- shows rows 1 and 2
+
+ -- release the second savepoint
+ RELEASE SAVEPOINT my_savepoint;
+
+ -- rollback to the first savepoint
+ ROLLBACK TO SAVEPOINT my_savepoint;
+ SELECT * FROM table1; -- shows only row 1
+COMMIT;
+
+ The above transaction shows row 3 being rolled back first, then row 2.
+
+
diff --git a/doc/src/sgml/ref/set.sgml b/doc/src/sgml/ref/set.sgml
index 339ee9eec94..c4aab56a2d3 100644
--- a/doc/src/sgml/ref/set.sgml
+++ b/doc/src/sgml/ref/set.sgml
@@ -175,8 +175,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone
Sets the internal seed for the random number generator (the
function random). Allowed values are
- floating-point numbers between -1 and 1, which are then
- multiplied by 231-1.
+ floating-point numbers between -1 and 1 inclusive.
diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml
index cf2630c3fc3..375644059db 100644
--- a/doc/src/sgml/runtime.sgml
+++ b/doc/src/sgml/runtime.sgml
@@ -1678,7 +1678,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`major releases of PostgreSQL, the
internal data storage format is subject to change, thus complicating
upgrades. The traditional method for moving data to a new major version
- is to dump and reload the database, though this can be slow. A
+ is to dump and restore the database, though this can be slow. A
faster method is . Replication methods are
also available, as discussed below.
(If you are using a pre-packaged version
@@ -1764,7 +1764,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`
One upgrade method is to dump data from one major version of
- PostgreSQL and reload it in another — to do
+ PostgreSQL and restore it in another — to do
this, you must use a logical backup tool like
pg_dumpall; file system
level backup methods will not work. (There are checks in place that prevent
diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml
index 6afaf9e62c4..fbe049f0636 100644
--- a/doc/src/sgml/textsearch.sgml
+++ b/doc/src/sgml/textsearch.sgml
@@ -1974,7 +1974,7 @@ CREATE TRIGGER tsvectorupdate BEFORE INSERT OR UPDATE
explicitly when creating tsvector values inside triggers,
so that the column's contents will not be affected by changes to
default_text_search_config. Failure to do this is likely to
- lead to problems such as search results changing after a dump and reload.
+ lead to problems such as search results changing after a dump and restore.
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index cff69879aa1..07831e9b098 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -11918,6 +11918,14 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
snprintf(fullpath, sizeof(fullpath), "pg_tblspc/%s", de->d_name);
+ /*
+ * Skip anything that isn't a symlink/junction. For testing only,
+ * we sometimes use allow_in_place_tablespaces to create
+ * directories directly under pg_tblspc, which would fail below.
+ */
+ if (get_dirent_type(fullpath, de, false, ERROR) != PGFILETYPE_LNK)
+ continue;
+
#if defined(HAVE_READLINK) || defined(WIN32)
rllen = readlink(fullpath, linkpath, sizeof(linkpath));
if (rllen < 0)
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 6c38ca470f6..39994474faf 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -1972,6 +1972,13 @@ find_expr_references_walker(Node *node,
add_object_address(OCLASS_TYPE, objoid, 0,
context->addrs);
break;
+ case REGCOLLATIONOID:
+ objoid = DatumGetObjectId(con->constvalue);
+ if (SearchSysCacheExists1(COLLOID,
+ ObjectIdGetDatum(objoid)))
+ add_object_address(OCLASS_COLLATION, objoid, 0,
+ context->addrs);
+ break;
case REGCONFIGOID:
objoid = DatumGetObjectId(con->constvalue);
if (SearchSysCacheExists1(TSCONFIGOID,
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 21c863ff346..c9a57726058 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -192,7 +192,7 @@ static int gp_acquire_sample_rows_func(Relation onerel, int elevel,
static BlockNumber acquire_index_number_of_blocks(Relation indexrel, Relation tablerel);
static void gp_acquire_correlations_dispatcher(Oid relOid, bool inh, float4 *correlations, bool *correlationsIsNull);
-static int compare_rows(const void *a, const void *b);
+static int compare_rows(const void *a, const void *b, void *arg);
static int acquire_inherited_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
@@ -1911,7 +1911,8 @@ acquire_sample_rows(Relation onerel, int elevel,
* tuples are already sorted.
*/
if (numrows == targrows)
- qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
+ qsort_interruptible((void *) rows, numrows, sizeof(HeapTuple),
+ compare_rows, NULL);
/*
* Estimate total numbers of live and dead rows in relation, extrapolating
@@ -1947,10 +1948,10 @@ acquire_sample_rows(Relation onerel, int elevel,
}
/*
- * qsort comparator for sorting rows[] array
+ * Comparator for sorting rows[] array
*/
static int
-compare_rows(const void *a, const void *b)
+compare_rows(const void *a, const void *b, void *arg)
{
HeapTuple ha = *(const HeapTuple *) a;
HeapTuple hb = *(const HeapTuple *) b;
@@ -3308,7 +3309,7 @@ static void merge_leaf_stats(VacAttrStatsP stats,
int samplerows,
double totalrows);
static int compare_scalars(const void *a, const void *b, void *arg);
-static int compare_mcvs(const void *a, const void *b);
+static int compare_mcvs(const void *a, const void *b, void *arg);
static int analyze_mcv_list(int *mcv_counts,
int num_mcv,
double stadistinct,
@@ -3978,8 +3979,8 @@ compute_scalar_stats(VacAttrStatsP stats,
/* Sort the collected values */
cxt.ssup = &ssup;
cxt.tupnoLink = tupnoLink;
- qsort_arg((void *) values, values_cnt, sizeof(ScalarItem),
- compare_scalars, (void *) &cxt);
+ qsort_interruptible((void *) values, values_cnt, sizeof(ScalarItem),
+ compare_scalars, (void *) &cxt);
/*
* Now scan the values in order, find the most common ones, and also
@@ -4246,8 +4247,8 @@ compute_scalar_stats(VacAttrStatsP stats,
deltafrac;
/* Sort the MCV items into position order to speed next loop */
- qsort((void *) track, num_mcv,
- sizeof(ScalarMCVItem), compare_mcvs);
+ qsort_interruptible((void *) track, num_mcv, sizeof(ScalarMCVItem),
+ compare_mcvs, NULL);
/*
* Collapse out the MCV items from the values[] array.
@@ -5005,7 +5006,7 @@ merge_leaf_stats(VacAttrStatsP stats,
}
/*
- * qsort_arg comparator for sorting ScalarItems
+ * Comparator for sorting ScalarItems
*
* Aside from sorting the items, we update the tupnoLink[] array
* whenever two ScalarItems are found to contain equal datums. The array
@@ -5042,10 +5043,10 @@ compare_scalars(const void *a, const void *b, void *arg)
}
/*
- * qsort comparator for sorting ScalarMCVItems by position
+ * Comparator for sorting ScalarMCVItems by position
*/
static int
-compare_mcvs(const void *a, const void *b)
+compare_mcvs(const void *a, const void *b, void *arg)
{
int da = ((const ScalarMCVItem *) a)->first;
int db = ((const ScalarMCVItem *) b)->first;
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 9175ebfb5ba..3d7d040c462 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -113,6 +113,7 @@
/* GUC variables */
char *default_tablespace = NULL;
char *temp_tablespaces = NULL;
+bool allow_in_place_tablespaces = false;
static void create_tablespace_directories(const char *location,
@@ -295,6 +296,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
Datum newOptions;
List *nonContentOptions = NIL;
char *fileHandler = NULL;
+ bool in_place;
/* Must be super user */
if (!superuser())
@@ -362,12 +364,15 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
(errcode(ERRCODE_INVALID_NAME),
errmsg("tablespace location cannot contain single quotes")));
+ in_place = allow_in_place_tablespaces && strlen(location) == 0;
+
/*
* Allowing relative paths seems risky
*
- * this also helps us ensure that location is not empty or whitespace
+ * This also helps us ensure that location is not empty or whitespace,
+ * unless specifying a developer-only in-place tablespace.
*/
- if (!is_absolute_path(location))
+ if (!in_place && !is_absolute_path(location))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("tablespace location must be an absolute path")));
@@ -862,20 +867,40 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
char *location_with_dbid_dir;
char *location_with_version_dir;
struct stat st;
+ bool in_place;
elog(DEBUG5, "creating tablespace directories for tablespaceoid %d on dbid %d",
tablespaceoid, GpIdentity.dbid);
linkloc = psprintf("pg_tblspc/%u", tablespaceoid);
+
+ /*
+ * If we're asked to make an 'in place' tablespace, create the directory
+ * directly where the symlink would normally go. This is a developer-only
+ * option for now, to facilitate regression testing.
+ */
+ in_place = strlen(location) == 0;
+
+ if (in_place)
+ {
+ if (MakePGDirectory(linkloc) < 0 && errno != EEXIST)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not create directory \"%s\": %m",
+ linkloc)));
+ }
+
location_with_dbid_dir = psprintf("%s/%d", location, GpIdentity.dbid);
- location_with_version_dir = psprintf("%s/%s", location_with_dbid_dir,
+ location_with_version_dir = psprintf("%s/%s", in_place ? linkloc : location_with_dbid_dir,
GP_TABLESPACE_VERSION_DIRECTORY);
/*
* Attempt to coerce target directory to safe permissions. If this fails,
- * it doesn't exist or has the wrong owner.
+ * it doesn't exist or has the wrong owner. Not needed for in-place mode,
+ * because in that case we created the directory with the desired
+ * permissions.
*/
- if (chmod(location, pg_dir_create_mode) != 0)
+ if (!in_place && chmod(location, pg_dir_create_mode) != 0)
{
if (errno == ENOENT)
ereport(ERROR,
@@ -949,13 +974,13 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
/*
* In recovery, remove old symlink, in case it points to the wrong place.
*/
- if (InRecovery)
+ if (!in_place && InRecovery)
remove_tablespace_symlink(linkloc);
/*
* Create the symlink under PGDATA
*/
- if (symlink(location_with_dbid_dir, linkloc) < 0)
+ if (!in_place && symlink(location_with_dbid_dir, linkloc) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create symbolic link \"%s\": %m",
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 5db53b125ee..4a2ddd5dff3 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -442,6 +442,16 @@ SPI_rollback_and_chain(void)
_SPI_rollback(true);
}
+/*
+ * SPICleanup is a no-op, kept for backwards compatibility. We rely on
+ * AtEOXact_SPI to cleanup. Extensions should not (need to) fiddle with the
+ * internal SPI state directly.
+ */
+void
+SPICleanup(void)
+{
+}
+
/*
* Clean up SPI state at transaction commit or abort.
*/
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 9e82bd85c75..dfe348c1f40 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -3487,6 +3487,9 @@ expandNSItemAttrs(ParseState *pstate, ParseNamespaceItem *nsitem,
*
* "*" is returned if the given attnum is InvalidAttrNumber --- this case
* occurs when a Var represents a whole tuple of a relation.
+ *
+ * It is caller's responsibility to not call this on a dropped attribute.
+ * (You will get some answer for such cases, but it might not be sensible.)
*/
char *
get_rte_attribute_name(RangeTblEntry *rte, AttrNumber attnum)
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index aff0b0db05b..ee1c25416bd 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -1135,8 +1135,8 @@ build_sorted_items(StatsBuildData *data, int *nitems,
}
/* do the sort, using the multi-sort */
- qsort_arg((void *) items, nrows, sizeof(SortItem),
- multi_sort_compare, mss);
+ qsort_interruptible((void *) items, nrows, sizeof(SortItem),
+ multi_sort_compare, mss);
return items;
}
diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c
index ef118952c74..e6a60865282 100644
--- a/src/backend/statistics/mcv.c
+++ b/src/backend/statistics/mcv.c
@@ -404,7 +404,7 @@ count_distinct_groups(int numrows, SortItem *items, MultiSortSupport mss)
* order.
*/
static int
-compare_sort_item_count(const void *a, const void *b)
+compare_sort_item_count(const void *a, const void *b, void *arg)
{
SortItem *ia = (SortItem *) a;
SortItem *ib = (SortItem *) b;
@@ -457,8 +457,8 @@ build_distinct_groups(int numrows, SortItem *items, MultiSortSupport mss,
Assert(j + 1 == ngroups);
/* Sort the distinct groups by frequency (in descending order). */
- pg_qsort((void *) groups, ngroups, sizeof(SortItem),
- compare_sort_item_count);
+ qsort_interruptible((void *) groups, ngroups, sizeof(SortItem),
+ compare_sort_item_count, NULL);
*ndistinct = ngroups;
return groups;
@@ -528,8 +528,8 @@ build_column_frequencies(SortItem *groups, int ngroups,
}
/* sort the values, deduplicate */
- qsort_arg((void *) result[dim], ngroups, sizeof(SortItem),
- sort_item_compare, ssup);
+ qsort_interruptible((void *) result[dim], ngroups, sizeof(SortItem),
+ sort_item_compare, ssup);
/*
* Identify distinct values, compute frequency (there might be
@@ -695,8 +695,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
PrepareSortSupportFromOrderingOp(typentry->lt_opr, &ssup[dim]);
- qsort_arg(values[dim], counts[dim], sizeof(Datum),
- compare_scalars_simple, &ssup[dim]);
+ qsort_interruptible(values[dim], counts[dim], sizeof(Datum),
+ compare_scalars_simple, &ssup[dim]);
/*
* Walk through the array and eliminate duplicate values, but keep the
diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c
index 4481312d61d..4b4ecec9361 100644
--- a/src/backend/statistics/mvdistinct.c
+++ b/src/backend/statistics/mvdistinct.c
@@ -488,8 +488,8 @@ ndistinct_for_combination(double totalrows, StatsBuildData *data,
}
/* We can sort the array now ... */
- qsort_arg((void *) items, numrows, sizeof(SortItem),
- multi_sort_compare, mss);
+ qsort_interruptible((void *) items, numrows, sizeof(SortItem),
+ multi_sort_compare, mss);
/* ... and count the number of distinct combinations */
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 6d0afd34356..dd16c3df60a 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -718,18 +718,28 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
if (BufferIsLocal(recent_buffer))
{
- bufHdr = GetBufferDescriptor(-recent_buffer - 1);
+ int b = -recent_buffer - 1;
+
+ bufHdr = GetLocalBufferDescriptor(b);
buf_state = pg_atomic_read_u32(&bufHdr->state);
/* Is it still valid and holding the right tag? */
if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
{
- /* Bump local buffer's ref and usage counts. */
+ /*
+ * Bump buffer's ref and usage counts. This is equivalent of
+ * PinBuffer for a shared buffer.
+ */
+ if (LocalRefCount[b] == 0)
+ {
+ if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
+ {
+ buf_state += BUF_USAGECOUNT_ONE;
+ pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
+ }
+ }
+ LocalRefCount[b]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner, recent_buffer);
- LocalRefCount[-recent_buffer - 1]++;
- if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
- pg_atomic_write_u32(&bufHdr->state,
- buf_state + BUF_USAGECOUNT_ONE);
return true;
}
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index 1ebba4b3f56..504ba1569ee 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -44,8 +44,10 @@ static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current);
static uint32 lexeme_hash(const void *key, Size keysize);
static int lexeme_match(const void *key1, const void *key2, Size keysize);
static int lexeme_compare(const void *key1, const void *key2);
-static int trackitem_compare_frequencies_desc(const void *e1, const void *e2);
-static int trackitem_compare_lexemes(const void *e1, const void *e2);
+static int trackitem_compare_frequencies_desc(const void *e1, const void *e2,
+ void *arg);
+static int trackitem_compare_lexemes(const void *e1, const void *e2,
+ void *arg);
/*
@@ -347,8 +349,8 @@ compute_tsvector_stats(VacAttrStats *stats,
*/
if (num_mcelem < track_len)
{
- qsort(sort_table, track_len, sizeof(TrackItem *),
- trackitem_compare_frequencies_desc);
+ qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
+ trackitem_compare_frequencies_desc, NULL);
/* reset minfreq to the smallest frequency we're keeping */
minfreq = sort_table[num_mcelem - 1]->frequency;
}
@@ -376,8 +378,8 @@ compute_tsvector_stats(VacAttrStats *stats,
* presorted we can employ binary search for that. See
* ts_selfuncs.c for a real usage scenario.
*/
- qsort(sort_table, num_mcelem, sizeof(TrackItem *),
- trackitem_compare_lexemes);
+ qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
+ trackitem_compare_lexemes, NULL);
/* Must copy the target values into anl_context */
old_context = MemoryContextSwitchTo(stats->anl_context);
@@ -510,10 +512,10 @@ lexeme_compare(const void *key1, const void *key2)
}
/*
- * qsort() comparator for sorting TrackItems on frequencies (descending sort)
+ * Comparator for sorting TrackItems on frequencies (descending sort)
*/
static int
-trackitem_compare_frequencies_desc(const void *e1, const void *e2)
+trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
{
const TrackItem *const *t1 = (const TrackItem *const *) e1;
const TrackItem *const *t2 = (const TrackItem *const *) e2;
@@ -522,10 +524,10 @@ trackitem_compare_frequencies_desc(const void *e1, const void *e2)
}
/*
- * qsort() comparator for sorting TrackItems on lexemes
+ * Comparator for sorting TrackItems on lexemes
*/
static int
-trackitem_compare_lexemes(const void *e1, const void *e2)
+trackitem_compare_lexemes(const void *e1, const void *e2, void *arg)
{
const TrackItem *const *t1 = (const TrackItem *const *) e1;
const TrackItem *const *t2 = (const TrackItem *const *) e2;
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index c5008a0c169..e873d228592 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -86,9 +86,9 @@ static void prune_element_hashtable(HTAB *elements_tab, int b_current);
static uint32 element_hash(const void *key, Size keysize);
static int element_match(const void *key1, const void *key2, Size keysize);
static int element_compare(const void *key1, const void *key2);
-static int trackitem_compare_frequencies_desc(const void *e1, const void *e2);
-static int trackitem_compare_element(const void *e1, const void *e2);
-static int countitem_compare_count(const void *e1, const void *e2);
+static int trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg);
+static int trackitem_compare_element(const void *e1, const void *e2, void *arg);
+static int countitem_compare_count(const void *e1, const void *e2, void *arg);
/*
@@ -502,8 +502,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
*/
if (num_mcelem < track_len)
{
- qsort(sort_table, track_len, sizeof(TrackItem *),
- trackitem_compare_frequencies_desc);
+ qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
+ trackitem_compare_frequencies_desc, NULL);
/* reset minfreq to the smallest frequency we're keeping */
minfreq = sort_table[num_mcelem - 1]->frequency;
}
@@ -522,8 +522,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* the element type's default comparison function. This permits
* fast binary searches in selectivity estimation functions.
*/
- qsort(sort_table, num_mcelem, sizeof(TrackItem *),
- trackitem_compare_element);
+ qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
+ trackitem_compare_element, NULL);
/* Must copy the target values into anl_context */
old_context = MemoryContextSwitchTo(stats->anl_context);
@@ -599,8 +599,9 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
{
sorted_count_items[j++] = count_item;
}
- qsort(sorted_count_items, count_items_count,
- sizeof(DECountItem *), countitem_compare_count);
+ qsort_interruptible(sorted_count_items, count_items_count,
+ sizeof(DECountItem *),
+ countitem_compare_count, NULL);
/*
* Prepare to fill stanumbers with the histogram, followed by the
@@ -751,10 +752,10 @@ element_compare(const void *key1, const void *key2)
}
/*
- * qsort() comparator for sorting TrackItems by frequencies (descending sort)
+ * Comparator for sorting TrackItems by frequencies (descending sort)
*/
static int
-trackitem_compare_frequencies_desc(const void *e1, const void *e2)
+trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
{
const TrackItem *const *t1 = (const TrackItem *const *) e1;
const TrackItem *const *t2 = (const TrackItem *const *) e2;
@@ -763,10 +764,10 @@ trackitem_compare_frequencies_desc(const void *e1, const void *e2)
}
/*
- * qsort() comparator for sorting TrackItems by element values
+ * Comparator for sorting TrackItems by element values
*/
static int
-trackitem_compare_element(const void *e1, const void *e2)
+trackitem_compare_element(const void *e1, const void *e2, void *arg)
{
const TrackItem *const *t1 = (const TrackItem *const *) e1;
const TrackItem *const *t2 = (const TrackItem *const *) e2;
@@ -775,10 +776,10 @@ trackitem_compare_element(const void *e1, const void *e2)
}
/*
- * qsort() comparator for sorting DECountItems by count
+ * Comparator for sorting DECountItems by count
*/
static int
-countitem_compare_count(const void *e1, const void *e2)
+countitem_compare_count(const void *e1, const void *e2, void *arg)
{
const DECountItem *const *t1 = (const DECountItem *const *) e1;
const DECountItem *const *t2 = (const DECountItem *const *) e2;
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index eb8877fd7e8..eba029daa74 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -15,6 +15,7 @@
#include "postgres.h"
#include
+#include
#include
#include
#include
@@ -312,6 +313,9 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
char sourcepath[MAXPGPATH];
char targetpath[MAXPGPATH];
int rllen;
+#ifndef WIN32
+ struct stat st;
+#endif
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
@@ -336,6 +340,31 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
*/
snprintf(sourcepath, sizeof(sourcepath), "pg_tblspc/%u", tablespaceOid);
+ /*
+ * Before reading the link, check if the source path is a link or a
+ * junction point. Note that a directory is possible for a tablespace
+ * created with allow_in_place_tablespaces enabled. If a directory is
+ * found, a relative path to the data directory is returned.
+ */
+#ifdef WIN32
+ if (!pgwin32_is_junction(sourcepath))
+ PG_RETURN_TEXT_P(cstring_to_text(sourcepath));
+#else
+ if (lstat(sourcepath, &st) < 0)
+ {
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m",
+ sourcepath)));
+ }
+
+ if (!S_ISLNK(st.st_mode))
+ PG_RETURN_TEXT_P(cstring_to_text(sourcepath));
+#endif
+
+ /*
+ * In presence of a link or a junction point, return the path pointing to.
+ */
rllen = readlink(sourcepath, targetpath, sizeof(targetpath));
if (rllen < 0)
ereport(ERROR,
diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c
index 0d01252cd7c..9d5cf897c45 100644
--- a/src/backend/utils/adt/rangetypes_typanalyze.c
+++ b/src/backend/utils/adt/rangetypes_typanalyze.c
@@ -32,7 +32,7 @@
#include "utils/rangetypes.h"
#include "utils/multirangetypes.h"
-static int float8_qsort_cmp(const void *a1, const void *a2);
+static int float8_qsort_cmp(const void *a1, const void *a2, void *arg);
static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
static void compute_range_stats(VacAttrStats *stats,
AnalyzeAttrFetchFunc fetchfunc, int samplerows,
@@ -93,7 +93,7 @@ multirange_typanalyze(PG_FUNCTION_ARGS)
* Comparison function for sorting float8s, used for range lengths.
*/
static int
-float8_qsort_cmp(const void *a1, const void *a2)
+float8_qsort_cmp(const void *a1, const void *a2, void *arg)
{
const float8 *f1 = (const float8 *) a1;
const float8 *f2 = (const float8 *) a2;
@@ -280,10 +280,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
if (non_empty_cnt >= 2)
{
/* Sort bound values */
- qsort_arg(lowers, non_empty_cnt, sizeof(RangeBound),
- range_bound_qsort_cmp, typcache);
- qsort_arg(uppers, non_empty_cnt, sizeof(RangeBound),
- range_bound_qsort_cmp, typcache);
+ qsort_interruptible(lowers, non_empty_cnt, sizeof(RangeBound),
+ range_bound_qsort_cmp, typcache);
+ qsort_interruptible(uppers, non_empty_cnt, sizeof(RangeBound),
+ range_bound_qsort_cmp, typcache);
num_hist = non_empty_cnt;
if (num_hist > num_bins)
@@ -345,7 +345,8 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* Ascending sort of range lengths for further filling of
* histogram
*/
- qsort(lengths, non_empty_cnt, sizeof(float8), float8_qsort_cmp);
+ qsort_interruptible(lengths, non_empty_cnt, sizeof(float8),
+ float8_qsort_cmp, NULL);
num_hist = non_empty_cnt;
if (num_hist > num_bins)
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index cdbd27d4d95..ea8156bebad 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -58,6 +58,7 @@
#include "parser/parse_node.h"
#include "parser/parse_oper.h"
#include "parser/parse_cte.h"
+#include "parser/parse_relation.h"
#include "parser/parser.h"
#include "parser/parsetree.h"
#include "rewrite/rewriteHandler.h"
@@ -4241,9 +4242,9 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
int j;
/*
- * Extract the RTE's "real" column names. This is comparable to
- * get_rte_attribute_name, except that it's important to disregard dropped
- * columns. We put NULL into the array for a dropped column.
+ * Construct an array of the current "real" column names of the RTE.
+ * real_colnames[] will be indexed by physical column number, with NULL
+ * entries for dropped columns.
*/
if (rte->rtekind == RTE_RELATION)
{
@@ -4270,19 +4271,43 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
}
else
{
- /* Otherwise use the column names from eref */
+ /* Otherwise get the column names from eref or expandRTE() */
+ List *colnames;
ListCell *lc;
- ncolumns = list_length(rte->eref->colnames);
+ /*
+ * Functions returning composites have the annoying property that some
+ * of the composite type's columns might have been dropped since the
+ * query was parsed. If possible, use expandRTE() to handle that
+ * case, since it has the tedious logic needed to find out about
+ * dropped columns. However, if we're explaining a plan, then we
+ * don't have rte->functions because the planner thinks that won't be
+ * needed later, and that breaks expandRTE(). So in that case we have
+ * to rely on rte->eref, which may lead us to report a dropped
+ * column's old name; that seems close enough for EXPLAIN's purposes.
+ *
+ * For non-RELATION, non-FUNCTION RTEs, we can just look at rte->eref,
+ * which should be sufficiently up-to-date: no other RTE types can
+ * have columns get dropped from under them after parsing.
+ */
+ if (rte->rtekind == RTE_FUNCTION && rte->functions != NIL)
+ {
+ /* Since we're not creating Vars, rtindex etc. don't matter */
+ expandRTE(rte, 1, 0, -1, true /* include dropped */ ,
+ &colnames, NULL);
+ }
+ else
+ colnames = rte->eref->colnames;
+
+ ncolumns = list_length(colnames);
real_colnames = (char **) palloc(ncolumns * sizeof(char *));
i = 0;
- foreach(lc, rte->eref->colnames)
+ foreach(lc, colnames)
{
/*
- * If the column name shown in eref is an empty string, then it's
- * a column that was dropped at the time of parsing the query, so
- * treat it as dropped.
+ * If the column name we find here is an empty string, then it's a
+ * dropped column, so change to NULL.
*/
char *cname = strVal(lfirst(lc));
@@ -7296,9 +7321,16 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
elog(ERROR, "invalid attnum %d for relation \"%s\"",
attnum, rte->eref->aliasname);
attname = colinfo->colnames[attnum - 1];
- if (attname == NULL) /* dropped column? */
- elog(ERROR, "invalid attnum %d for relation \"%s\"",
- attnum, rte->eref->aliasname);
+
+ /*
+ * If we find a Var referencing a dropped column, it seems better to
+ * print something (anything) than to fail. In general this should
+ * not happen, but there are specific cases involving functions
+ * returning named composite types where we don't sufficiently enforce
+ * that you can't drop a column that's referenced in some view.
+ */
+ if (attname == NULL)
+ attname = "?dropped?column?";
}
else
{
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 6001982a6d2..10017cb583a 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -4336,6 +4336,7 @@ convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
case REGOPERATOROID:
case REGCLASSOID:
case REGTYPEOID:
+ case REGCOLLATIONOID:
case REGCONFIGOID:
case REGDICTIONARYOID:
case REGROLEOID:
@@ -4467,6 +4468,7 @@ convert_numeric_to_scalar(Datum value, Oid typid, bool *failure)
case REGOPERATOROID:
case REGCLASSOID:
case REGTYPEOID:
+ case REGCOLLATIONOID:
case REGCONFIGOID:
case REGDICTIONARYOID:
case REGROLEOID:
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 60f643c2d87..5ccb028a1a2 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -240,6 +240,7 @@ GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEq
case REGOPERATOROID:
case REGCLASSOID:
case REGTYPEOID:
+ case REGCOLLATIONOID:
case REGCONFIGOID:
case REGDICTIONARYOID:
case REGROLEOID:
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 353c4988a0a..cb3b1a1cbdd 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -51,6 +51,7 @@
#include "catalog/index.h"
#include "commands/async.h"
#include "commands/prepare.h"
+#include "commands/tablespace.h"
#include "commands/trigger.h"
#include "commands/user.h"
#include "commands/vacuum.h"
@@ -2045,6 +2046,17 @@ static struct config_bool ConfigureNamesBool[] =
NULL, NULL, NULL
},
+ {
+ {"allow_in_place_tablespaces", PGC_SUSET, DEVELOPER_OPTIONS,
+ gettext_noop("Allows tablespaces directly inside pg_tblspc, for testing."),
+ NULL,
+ GUC_NOT_IN_SAMPLE
+ },
+ &allow_in_place_tablespaces,
+ false,
+ NULL, NULL, NULL
+ },
+
{
{"lo_compat_privileges", PGC_SUSET, COMPAT_OPTIONS_PREVIOUS,
gettext_noop("Enables backward compatibility mode for privilege checks on large objects."),
diff --git a/src/backend/utils/sort/Makefile b/src/backend/utils/sort/Makefile
index 26f65fcaf7a..2c31fd453d6 100644
--- a/src/backend/utils/sort/Makefile
+++ b/src/backend/utils/sort/Makefile
@@ -16,6 +16,7 @@ override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS)
OBJS = \
logtape.o \
+ qsort_interruptible.o \
sharedtuplestore.o \
sortsupport.o \
tuplesort.o \
diff --git a/src/backend/utils/sort/qsort_interruptible.c b/src/backend/utils/sort/qsort_interruptible.c
new file mode 100644
index 00000000000..f179b256248
--- /dev/null
+++ b/src/backend/utils/sort/qsort_interruptible.c
@@ -0,0 +1,16 @@
+/*
+ * qsort_interruptible.c: qsort_arg that includes CHECK_FOR_INTERRUPTS
+ */
+
+#include "postgres.h"
+#include "miscadmin.h"
+
+#define ST_SORT qsort_interruptible
+#define ST_ELEMENT_TYPE_VOID
+#define ST_COMPARATOR_TYPE_NAME qsort_arg_comparator
+#define ST_COMPARE_RUNTIME_POINTER
+#define ST_COMPARE_ARG_TYPE void
+#define ST_SCOPE
+#define ST_DEFINE
+#define ST_CHECK_FOR_INTERRUPTS
+#include "lib/sort_template.h"
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 973a6d4639d..456d13c28a3 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -1302,10 +1302,10 @@ check_for_incompatible_polymorphics(ClusterInfo *cluster)
fclose(script);
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains user-defined objects that refer to internal\n"
- "polymorphic functions with arguments of type 'anyarray' or 'anyelement'.\n"
+ "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
"These user-defined objects must be dropped before upgrading and restored\n"
"afterwards, changing them to refer to the new corresponding functions with\n"
- "arguments of type 'anycompatiblearray' and 'anycompatible'.\n"
+ "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
"A list of the problematic objects is in the file:\n"
" %s\n\n", output_path);
}
diff --git a/src/common/file_utils.c b/src/common/file_utils.c
index 40b73bbe1ab..fd2d11375c9 100644
--- a/src/common/file_utils.c
+++ b/src/common/file_utils.c
@@ -465,5 +465,21 @@ get_dirent_type(const char *path,
#endif
}
+#if defined(WIN32) && !defined(_MSC_VER)
+
+ /*
+ * If we're on native Windows (not Cygwin, which has its own POSIX
+ * symlinks), but not using the MSVC compiler, then we're using a
+ * readdir() emulation provided by the MinGW runtime that has no d_type.
+ * Since the lstat() fallback code reports junction points as directories,
+ * we need an extra system call to check if we should report them as
+ * symlinks instead, following our convention.
+ */
+ if (result == PGFILETYPE_DIR &&
+ !look_through_symlinks &&
+ pgwin32_is_junction(path))
+ result = PGFILETYPE_LNK;
+#endif
+
return result;
}
diff --git a/src/include/commands/tablespace.h b/src/include/commands/tablespace.h
index 1f41964cf75..fe13c5d75d7 100644
--- a/src/include/commands/tablespace.h
+++ b/src/include/commands/tablespace.h
@@ -20,6 +20,8 @@
#include "nodes/parsenodes.h"
#include "storage/dbdirnode.h"
+extern bool allow_in_place_tablespaces;
+
/* XLOG stuff */
#define XLOG_TBLSPC_CREATE 0x00
#define XLOG_TBLSPC_DROP 0x10
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index fc60fdb9584..ef1964b709d 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -205,6 +205,7 @@ extern void SPI_commit_and_chain(void);
extern void SPI_rollback(void);
extern void SPI_rollback_and_chain(void);
+extern void SPICleanup(void);
extern void AtEOXact_SPI(bool isCommit);
extern void AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid);
extern bool SPI_inside_nonatomic_context(void);
diff --git a/src/include/port.h b/src/include/port.h
index cb34aca03eb..c30e558a362 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -511,6 +511,9 @@ typedef int (*qsort_arg_comparator) (const void *a, const void *b, void *arg);
extern void qsort_arg(void *base, size_t nel, size_t elsize,
qsort_arg_comparator cmp, void *arg);
+extern void qsort_interruptible(void *base, size_t nel, size_t elsize,
+ qsort_arg_comparator cmp, void *arg);
+
extern void *bsearch_arg(const void *key, const void *base,
size_t nmemb, size_t size,
int (*compar) (const void *, const void *, void *),
diff --git a/src/include/utils/sync_guc_name.h b/src/include/utils/sync_guc_name.h
index 6d09f49155f..3a99016d813 100644
--- a/src/include/utils/sync_guc_name.h
+++ b/src/include/utils/sync_guc_name.h
@@ -10,6 +10,7 @@
"allow_dml_directory_table",
"allow_segment_DML",
"allow_system_table_mods",
+ "allow_in_place_tablespaces",
"array_nulls",
"backtrace_functions",
"bytea_output",
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index b2c7727f68d..ea3a78420ca 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -1482,6 +1482,7 @@ PQsendQueryInternal(PGconn *conn, const char *query, bool newQuery)
sendFailed:
pqRecycleCmdQueueEntry(conn, entry);
+ pqRecycleCmdQueueEntry(conn, entry2);
/* error message should be set up already */
return 0;
}
diff --git a/src/port/dirent.c b/src/port/dirent.c
index 77b90e7e302..2cd134495ff 100644
--- a/src/port/dirent.c
+++ b/src/port/dirent.c
@@ -106,13 +106,17 @@ readdir(DIR *d)
}
strcpy(d->ret.d_name, fd.cFileName); /* Both strings are MAX_PATH long */
d->ret.d_namlen = strlen(d->ret.d_name);
- /* The only identified types are: directory, regular file or symbolic link */
- if ((fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0)
- d->ret.d_type = DT_DIR;
- /* For reparse points dwReserved0 field will contain the ReparseTag */
- else if ((fd.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0 &&
- (fd.dwReserved0 == IO_REPARSE_TAG_MOUNT_POINT))
+
+ /*
+ * For reparse points dwReserved0 field will contain the ReparseTag. We
+ * check this first, because reparse points are also reported as
+ * directories.
+ */
+ if ((fd.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0 &&
+ (fd.dwReserved0 == IO_REPARSE_TAG_MOUNT_POINT))
d->ret.d_type = DT_LNK;
+ else if ((fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0)
+ d->ret.d_type = DT_DIR;
else
d->ret.d_type = DT_REG;
diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out
index 82332a47c11..fdb0657bb72 100644
--- a/src/test/regress/expected/create_view.out
+++ b/src/test/regress/expected/create_view.out
@@ -1551,17 +1551,26 @@ select * from tt14v;
begin;
-- this perhaps should be rejected, but it isn't:
alter table tt14t drop column f3;
--- f3 is still in the view ...
+-- column f3 is still in the view, sort of ...
select pg_get_viewdef('tt14v', true);
- pg_get_viewdef
---------------------------------
- SELECT t.f1, +
- t.f3, +
- t.f4 +
- FROM tt14f() t(f1, f3, f4);
+ pg_get_viewdef
+---------------------------------
+ SELECT t.f1, +
+ t."?dropped?column?" AS f3,+
+ t.f4 +
+ FROM tt14f() t(f1, f4);
(1 row)
--- but will fail at execution
+-- ... and you can even EXPLAIN it ...
+explain (verbose, costs off) select * from tt14v;
+ QUERY PLAN
+----------------------------------------
+ Function Scan on testviewschm2.tt14f t
+ Output: t.f1, t.f3, t.f4
+ Function Call: tt14f()
+(3 rows)
+
+-- but it will fail at execution
select f1, f4 from tt14v;
f1 | f4
-----+----
diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql
index d8f44923945..2e7452ac9ea 100644
--- a/src/test/regress/sql/create_view.sql
+++ b/src/test/regress/sql/create_view.sql
@@ -533,9 +533,11 @@ begin;
-- this perhaps should be rejected, but it isn't:
alter table tt14t drop column f3;
--- f3 is still in the view ...
+-- column f3 is still in the view, sort of ...
select pg_get_viewdef('tt14v', true);
--- but will fail at execution
+-- ... and you can even EXPLAIN it ...
+explain (verbose, costs off) select * from tt14v;
+-- but it will fail at execution
select f1, f4 from tt14v;
select * from tt14v;