From a457d33516a802dcb0c5c6f1c75912b0f0ac6f1a Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 10 Sep 2001 21:58:47 +0000 Subject: [PATCH] Markup and spell-check run over Programmer's Guide (rather incomplete still). --- doc/src/sgml/arch-pg.sgml | 2 +- doc/src/sgml/ecpg.sgml | 76 +++--- doc/src/sgml/extend.sgml | 4 +- doc/src/sgml/indexcost.sgml | 4 +- doc/src/sgml/jdbc.sgml | 64 ++--- doc/src/sgml/libpgeasy.sgml | 52 ++-- doc/src/sgml/libpgtcl.sgml | 127 +++++----- doc/src/sgml/libpq++.sgml | 86 +++---- doc/src/sgml/libpq.sgml | 458 +++++++++++++++++++----------------- doc/src/sgml/lobj.sgml | 40 ++-- doc/src/sgml/odbc.sgml | 82 +++---- doc/src/sgml/plperl.sgml | 20 +- doc/src/sgml/plpython.sgml | 14 +- doc/src/sgml/plsql.sgml | 174 +++++++------- doc/src/sgml/pltcl.sgml | 64 ++--- doc/src/sgml/rules.sgml | 302 ++++++++++++------------ doc/src/sgml/xaggr.sgml | 16 +- doc/src/sgml/xfunc.sgml | 4 +- doc/src/sgml/xindex.sgml | 16 +- doc/src/sgml/xoper.sgml | 32 +-- doc/src/sgml/xplang.sgml | 14 +- 21 files changed, 845 insertions(+), 806 deletions(-) diff --git a/doc/src/sgml/arch-pg.sgml b/doc/src/sgml/arch-pg.sgml index 5a347378e2..fb6906698e 100644 --- a/doc/src/sgml/arch-pg.sgml +++ b/doc/src/sgml/arch-pg.sgml @@ -66,7 +66,7 @@ From that point on, the frontend process and the backend in mind, because the files that can be accessed on a client machine may not be accessible (or may only be accessed - using a different filename) on the database server + using a different file name) on the database server machine. You should also be aware that the postmaster and postgres servers run with the user-id of the Postgres diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index d7901bc250..21c131a32f 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1,5 +1,5 @@ @@ -153,12 +153,12 @@ $Header: /cvsroot/pgsql/doc/src/sgml/ecpg.sgml,v 1.21 2001/09/06 00:23:42 momjia To detect errors from the Postgres server, include a line like: - + exec sql include sqlca; - + in the include section of your file. This will define a struct and a variable with the name sqlca as follows: - + struct sqlca { char sqlcaid[8]; @@ -191,11 +191,11 @@ struct sqlca /* 7: empty */ char sqlext[8]; } sqlca; - + - If an error occured in the last SQL statement. + If an error occurred in the last SQL statement. sqlca.sqlcode will be non-zero. If sqlca.sqlcode is less that 0, this is a serious error, like the database definition does not match the @@ -214,7 +214,7 @@ struct sqlca - -12, Out of memory in line %d. + -12, Out of memory in line %d. Should not normally occur. This indicates your virtual memory is @@ -224,7 +224,7 @@ struct sqlca - -200, Unsupported type %s on line %d. + -200, Unsupported type %s on line %d. Should not normally occur. This indicates the preprocessor has @@ -236,7 +236,7 @@ struct sqlca - -201, Too many arguments line %d. + -201, Too many arguments line %d. This means that Postgres has @@ -248,7 +248,7 @@ struct sqlca - -202, Too few arguments line %d. + -202, Too few arguments line %d. This means that Postgres has @@ -260,7 +260,7 @@ struct sqlca - -203, Too many matches line %d. + -203, Too many matches line %d. This means the query has returned several rows but the @@ -271,7 +271,7 @@ struct sqlca - -204, Not correctly formatted int type: %s line %d. + -204, Not correctly formatted int type: %s line %d. This means the host variable is of type int and @@ -284,7 +284,7 @@ struct sqlca - -205, Not correctly formatted unsigned type: %s line %d. + -205, Not correctly formatted unsigned type: %s line %d. This means the host variable is of type unsigned @@ -298,7 +298,7 @@ struct sqlca - -206, Not correctly formatted floating point type: %s line %d. + -206, Not correctly formatted floating point type: %s line %d. This means the host variable is of type float and @@ -311,7 +311,7 @@ struct sqlca - -207, Unable to convert %s to bool on line %d. + -207, Unable to convert %s to bool on line %d. This means the host variable is of type bool and @@ -322,17 +322,17 @@ struct sqlca - -208, Empty query line %d. + -208, Empty query line %d. - Postgres returned PGRES_EMPTY_QUERY, probably + Postgres returned PGRES_EMPTY_QUERY, probably because the query indeed was empty. - -220, No such connection %s in line %d. + -220, No such connection %s in line %d. The program tried to access a connection that does not exist. @@ -341,7 +341,7 @@ struct sqlca - -221, Not connected in line %d. + -221, Not connected in line %d. The program tried to access a connection that does exist but is @@ -351,7 +351,7 @@ struct sqlca - -230, Invalid statement name %s in line %d. + -230, Invalid statement name %s in line %d. The statement you are trying to use has not been prepared. @@ -360,7 +360,7 @@ struct sqlca - -400, Postgres error: %s line %d. + -400, Postgres error: %s line %d. Some Postgres error. @@ -371,7 +371,7 @@ struct sqlca - -401, Error in transaction processing line %d. + -401, Error in transaction processing line %d. Postgres signaled that we cannot start, @@ -381,7 +381,7 @@ struct sqlca - -402, connect: could not open database %s. + -402, connect: could not open database %s. The connect to the database did not work. @@ -390,10 +390,10 @@ struct sqlca - 100, Data not found line %d. + 100, Data not found line %d. - This is a "normal" error that tells you that what you are quering cannot + This is a "normal" error that tells you that what you are querying cannot be found or you are at the end of the cursor. @@ -419,7 +419,7 @@ struct sqlca Oracle's single tasking - Oracle version 7.0 on AIX 3 uses OS-supported locks in shared + Oracle version 7.0 on AIX 3 uses OS-supported locks in shared memory that allow an application designer to link an application in a "single tasking" way. Instead of starting one client process per application process, both the database part and the @@ -526,21 +526,21 @@ struct sqlca The following statements are not implemented thus far: - exec sql allocate + exec sql allocate - exec sql deallocate + exec sql deallocate - SQLSTATE + SQLSTATE @@ -565,10 +565,10 @@ exec sql insert select from statement - sqlwarn[6] + sqlwarn[6] - sqlwarn[6] should be W if the PRECISION + sqlwarn[6] should be W if the PRECISION or SCALE value specified in a SET DESCRIPTOR statement was ignored. @@ -582,7 +582,7 @@ exec sql insert select from statement The Preprocessor - The first four lines written by ecpg to the output are fixed lines. + The first four lines written by ecpg to the output are fixed lines. Two are comments and two are include lines necessary to interface to the library. @@ -629,7 +629,7 @@ exec sql end declare section; - The special types VARCHAR and VARCHAR2 are converted into a named struct + The special types VARCHAR and VARCHAR2 are converted into a named struct for every variable. A declaration like: VARCHAR var[180]; @@ -745,7 +745,7 @@ exec sql connect to connection target; - Finally, the userid and password may be a constant text, a + Finally, the userid and password may be a constant text, a character variable, or a character string. @@ -853,7 +853,7 @@ exec sql rollback; The type as a special symbol. A pointer to the value or a pointer to the pointer. - The size of the variable if it is a char or varchar. + The size of the variable if it is a char or varchar. The number of elements in the array (for array fetches). The offset to the next element in the array (for array fetches). The type of the indicator variable as a special symbol. @@ -876,7 +876,7 @@ exec sql rollback; Here is a complete example describing the output of the preprocessor of a - file foo.pgc: + file foo.pgc: exec sql begin declare section; int index; @@ -919,7 +919,7 @@ ECPGdo(__LINE__, NULL, "select res from mytable where index = ? ", The most important function in the library is ECPGdo. It takes a variable number of arguments. Hopefully there are no computers that limit the - number of variables that can be accepted by a varargs() function. This + number of variables that can be accepted by a varargs() function. This can easily add up to 50 or so arguments. @@ -960,7 +960,7 @@ ECPGdo(__LINE__, NULL, "select res from mytable where index = ? ", - ECPGt_EOIT + ECPGt_EOIT An enum telling that there are no more input variables. diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index cda4532a51..eef5cd27e1 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -1,5 +1,5 @@ @@ -236,7 +236,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.10 2001/05/17 21:50:15 pete We use the words procedure - and function more or less interchangably. + and function more or less interchangeably. diff --git a/doc/src/sgml/indexcost.sgml b/doc/src/sgml/indexcost.sgml index 482a2e199e..c3f8ea70e1 100644 --- a/doc/src/sgml/indexcost.sgml +++ b/doc/src/sgml/indexcost.sgml @@ -1,5 +1,5 @@ @@ -154,7 +154,7 @@ amcostestimate (Query *root, The index access costs should be computed in the units used by - src/backend/optimizer/path/costsize.c: a sequential disk block fetch + src/backend/optimizer/path/costsize.c: a sequential disk block fetch has cost 1.0, a nonsequential fetch has cost random_page_cost, and the cost of processing one index tuple should usually be taken as cpu_index_tuple_cost (which is a user-adjustable optimizer parameter). diff --git a/doc/src/sgml/jdbc.sgml b/doc/src/sgml/jdbc.sgml index 3063ee432d..63e197cf1f 100644 --- a/doc/src/sgml/jdbc.sgml +++ b/doc/src/sgml/jdbc.sgml @@ -1,5 +1,5 @@ @@ -514,7 +514,7 @@ mycon.setAutoCommit(false); There are two methods of using Large Objects. The first is the standard JDBC way, and is documented here. The other, uses PostgreSQL extensions to - the API, which presents the libpq large object + the API, which presents the libpq large object API to Java, providing even better access to large objects than the standard. Internally, the driver uses the extension to provide large object support. @@ -674,7 +674,7 @@ import org.postgresql.fastpath.*; ... Fastpath fp = ((org.postgresql.Connection)myconn).getFastpathAPI(); - where myconn is an open Connection to PostgreSQL. + where myconn is an open Connection to PostgreSQL. @@ -709,21 +709,21 @@ import org.postgresql.largeobject.*; ... LargeObjectManager lo = ((org.postgresql.Connection)myconn).getLargeObjectAPI(); - where myconn is an open Connection to + where myconn is an open Connection to PostgreSQL. Returns: - LargeObject object that implements the API + LargeObject object that implements the API Throws: - SQLException by LargeObject when initializing for first time + SQLException by LargeObject when initializing for first time @@ -735,9 +735,9 @@ public void addDataType(String type, String name) This allows client code to add a handler for one of PostgreSQL's more unique data types. Normally, a data type not - known by the driver is returned by ResultSet.getObject() as a - PGobject instance. This method allows you to write a class - that extends PGobject, and tell the driver the type name, and + known by the driver is returned by ResultSet.getObject() as a + PGobject instance. This method allows you to write a class + that extends PGobject, and tell the driver the type name, and class name to use. The down side to this, is that you must call this method each time a connection is made. @@ -749,7 +749,7 @@ public void addDataType(String type, String name) ((org.postgresql.Connection)myconn).addDataType("mytype","my.class.name"); ... - where myconn is an open Connection to + where myconn is an open Connection to PostgreSQL. The handling class must extend org.postgresql.util.PGobject. @@ -772,7 +772,7 @@ java.lang.Object Fastpath is an API that - exists within the libpq C interface, and allows a client machine + exists within the libpq C interface, and allows a client machine to execute a function on the database backend. Most client code will not need to use this method, but it is provided because the Large Object API uses it. @@ -825,11 +825,11 @@ public Object fastpath(int fnid, Parameters: - fnid - Function id - resulttype - True if the result is an integer, false + fnid - Function id + resulttype - True if the result is an integer, false for other results - args - FastpathArguments to pass to fastpath + args - FastpathArguments to pass to fastpath @@ -855,7 +855,7 @@ public Object fastpath(String name, The mapping for the procedure name to function id needs to - exist, usually to an earlier call to addfunction(). This is + exist, usually to an earlier call to addfunction(). This is the preferred method to call, as function id's can/may change between versions of the backend. For an example of how this works, refer to org.postgresql.LargeObject @@ -865,11 +865,11 @@ public Object fastpath(String name, Parameters: - name - Function name - resulttype - True if the result is an integer, false + name - Function name + resulttype - True if the result is an integer, false for other results - args - FastpathArguments to pass to fastpath + args - FastpathArguments to pass to fastpath @@ -899,8 +899,8 @@ public int getInteger(String name, Parameters: - name - Function name - args - Function arguments + name - Function name + args - Function arguments @@ -912,7 +912,7 @@ public int getInteger(String name, Throws: - SQLException if a database-access error occurs or no result + SQLException if a database-access error occurs or no result @@ -930,8 +930,8 @@ public byte[] getData(String name, Parameters: - name - Function name - args - Function arguments + name - Function name + args - Function arguments @@ -943,7 +943,7 @@ public byte[] getData(String name, Throws: - SQLException if a database-access error occurs or no result + SQLException if a database-access error occurs or no result @@ -984,7 +984,7 @@ public void addFunctions(ResultSet rs) throws SQLException PostgreSQL stores the function id's and their corresponding - names in the pg_proc table. To speed things up locally, + names in the pg_proc table. To speed things up locally, instead of querying each function from that table when required, a Hashtable is used. Also, only the function's required are entered into this table, keeping connection @@ -1002,7 +1002,7 @@ public void addFunctions(ResultSet rs) throws SQLException Do not think that manually converting them to the oid's will - work. Okay, they will for now, but they can change during + work. OK, they will for now, but they can change during development (there was some discussion about this for V7.0), so this is implemented to prevent any unwarranted headaches in the future. @@ -1023,8 +1023,8 @@ public int getID(String name) throws SQLException This returns the function id associated by its name If - addFunction() or addFunctions() have not been called for this - name, then an SQLException is thrown. + addFunction() or addFunctions() have not been called for this + name, then an SQLException is thrown. @@ -1112,21 +1112,21 @@ public FastpathArg(byte buf[], - buf + buf source array - off + off offset within array - len + len length of data to include @@ -1880,7 +1880,7 @@ Methods - The org.postgresql.largeobject package provides to Java the libpq + The org.postgresql.largeobject package provides to Java the libpq C interface's large object API. It consists of two classes, LargeObjectManager, which deals with creating, opening and deleting large objects, and LargeObject which deals diff --git a/doc/src/sgml/libpgeasy.sgml b/doc/src/sgml/libpgeasy.sgml index e7195ae476..7a177c0dce 100644 --- a/doc/src/sgml/libpgeasy.sgml +++ b/doc/src/sgml/libpgeasy.sgml @@ -1,9 +1,9 @@ - libpgeasy - Simplified C Library + <application>libpgeasy</application> - Simplified C Library Author @@ -23,74 +23,74 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpgeasy.sgml,v 2.5 2001/04/20 15:52 It consists of set of simplified C functions that encapsulate the - functionality of libpq. + functionality of libpq. The functions are: - + PGresult *doquery(char *query); - + - + PGconn *connectdb(char *options); - + - + void disconnectdb(); - + - + int fetch(void *param,...); - + - + int fetchwithnulls(void *param,...); - + - + void reset_fetch(); - + - + void on_error_continue(); - + - + void on_error_stop(); - + - + PGresult *get_result(); - + - + void set_result(PGresult *newres); - + - + void unset_result(PGresult *oldres); - + @@ -118,7 +118,7 @@ void unset_result(PGresult *oldres); fetchwithnulls allows you to retrieve the NULL status of the field by passing an int* after each result pointer, which returns true or false if the field is null. - You can always use libpq functions on the PGresult pointer returned + You can always use libpq functions on the PGresult pointer returned by doquery. reset_fetch starts the fetch back at the beginning. diff --git a/doc/src/sgml/libpgtcl.sgml b/doc/src/sgml/libpgtcl.sgml index 24bb53bf53..bc342fa495 100644 --- a/doc/src/sgml/libpgtcl.sgml +++ b/doc/src/sgml/libpgtcl.sgml @@ -1,5 +1,5 @@ - pgtcl - Tcl Binding Library + <application>pgtcl</application> - Tcl Binding Library libpgtcl @@ -10,10 +10,10 @@ -pgtcl is a tcl package for front-end programs +pgtcl is a Tcl package for front-end programs to interface with Postgres backends. It makes most of the functionality of libpq available to -tcl scripts. +Tcl scripts. @@ -35,72 +35,72 @@ This package was originally written by Jolly Chen. - pg_connect + pg_connect opens a connection to the backend server - pg_disconnect + pg_disconnect closes a connection - pg_conndefaults + pg_conndefaults get connection options and their defaults - pg_exec + pg_exec send a query to the backend - pg_result + pg_result manipulate the results of a query - pg_select + pg_select loop over the result of a SELECT statement - pg_listen + pg_listen establish a callback for NOTIFY messages - pg_lo_creat + pg_lo_creat create a large object - pg_lo_open + pg_lo_open open a large object - pg_lo_close + pg_lo_close close a large object - pg_lo_read + pg_lo_read read a large object - pg_lo_write + pg_lo_write write a large object - pg_lo_lseek + pg_lo_lseek seek to a position in a large object - pg_lo_tell + pg_lo_tell return the current seek position of a large object - pg_lo_unlink + pg_lo_unlink delete a large object - pg_lo_import + pg_lo_import import a Unix file into a large object - pg_lo_export + pg_lo_export export a large object into a Unix file @@ -154,7 +154,7 @@ proc getDBs { {host "localhost"} {port "5432"} } { -pgtcl Command Reference Information +<application>pgtcl</application> Command Reference Information @@ -243,7 +243,7 @@ A list of valid options can be found in libpq's -tty pqtty -Specifies file or tty for optional debug output from backend. +Specifies file or tty for optional debug output from backend. @@ -273,7 +273,7 @@ A list of valid options can be found in libpq's If successful, a handle for a database connection is returned. -Handles start with the prefix "pgsql". +Handles start with the prefix pgsql. @@ -304,8 +304,9 @@ for info about the available options in the newer syntax. Usage -XXX thomas 1997-12-24 - + + XXX thomas 1997-12-24 + @@ -433,11 +434,11 @@ The result is a list describing the possible connection options and their current default values. Each entry in the list is a sublist of the format: - - {optname label dispchar dispsize value} - + +{optname label dispchar dispsize value} + -where the optname is usable as an option in +where the optname is usable as an option in pg_connect -conninfo. @@ -462,7 +463,7 @@ current default value for each option. Usage -pg_conndefaults +pg_conndefaults @@ -532,7 +533,7 @@ pg_exec dbHandle -A Tcl error will be returned if Pgtcl was unable to obtain a backend +A Tcl error will be returned if pgtcl was unable to obtain a backend response. Otherwise, a query result object is created and a handle for it is returned. This handle can be passed to pg_result to obtain the results of the query. @@ -620,7 +621,7 @@ Specifies one of several possible options. --status + @@ -630,7 +631,7 @@ the status of the result. --error + @@ -640,7 +641,7 @@ the error message, if the status indicates error; otherwise an empty string. --conn + @@ -650,7 +651,7 @@ the connection that produced the result. --oid + @@ -661,7 +662,7 @@ inserted tuple; otherwise an empty string. --numTuples + @@ -671,7 +672,7 @@ the number of tuples returned by the query. --numAttrs + @@ -681,7 +682,7 @@ the number of attributes in each tuple. --list VarName + @@ -691,32 +692,32 @@ assign the results to a list of lists. --assign arrayName + assign the results to an array, using subscripts of the form -(tupno,attributeName). +(tupno,attributeName). --assignbyidx arrayName ?appendstr? + assign the results to an array using the first attribute's value and -the remaining attributes' names as keys. If appendstr is given then +the remaining attributes' names as keys. If appendstr is given then it is appended to each key. In short, all but the first field of each tuple are stored into the array, using subscripts of the form -(firstFieldValue,fieldNameAppendStr). +(firstFieldValue,fieldNameAppendStr). --getTuple tupleNumber + @@ -727,18 +728,18 @@ start at zero. --tupleArray tupleNumber arrayName + -stores the fields of the tuple in array arrayName, indexed by field names. +stores the fields of the tuple in array arrayName, indexed by field names. Tuple numbers start at zero. --attributes + @@ -748,17 +749,17 @@ returns a list of the names of the tuple attributes. --lAttributes + -returns a list of sublists, {name ftype fsize} for each tuple attribute. +returns a list of sublists, {name ftype fsize} for each tuple attribute. --clear + @@ -796,7 +797,7 @@ created by a prior pg_exec. You can keep a query result around for as long as you need it, but when you are done with it, be sure to free it by executing pg_result -clear. Otherwise, you have -a memory leak, and Pgtcl will eventually start complaining that you've +a memory leak, and Pgtcl will eventually start complaining that you've created too many query result objects. @@ -1021,9 +1022,9 @@ when a matching notification arrives. pg_listen creates, changes, or cancels a request to listen for asynchronous NOTIFY messages from the -Postgres backend. With a callbackCommand +Postgres backend. With a callbackCommand parameter, the request is established, or the command string of an already -existing request is replaced. With no callbackCommand parameter, a prior +existing request is replaced. With no callbackCommand parameter, a prior request is canceled. @@ -1041,8 +1042,8 @@ the idle loop to be entered. -You should not invoke the SQL statements LISTEN or UNLISTEN directly when -using pg_listen. Pgtcl takes care of issuing those +You should not invoke the SQL statements LISTEN or UNLISTEN directly when +using pg_listen. Pgtcl takes care of issuing those statements for you. But if you want to send a NOTIFY message yourself, invoke the SQL NOTIFY statement using pg_exec. @@ -1754,7 +1755,9 @@ pg_lo_unlink conn Identifier for a large object. -XXX Is this the same as objOid in other calls?? - thomas 1998-01-11 + + XXX Is this the same as objOid in other calls?? - thomas 1998-01-11 + @@ -1850,7 +1853,9 @@ Unix file name. None -XXX Does this return a lobjId? Is that the same as the objOid in other calls? thomas - 1998-01-11 + + XXX Does this return a lobjId? Is that the same as the objOid in other calls? thomas - 1998-01-11 + @@ -1919,7 +1924,9 @@ pg_lo_export conn Large object identifier. -XXX Is this the same as the objOid in other calls?? thomas - 1998-01-11 + + XXX Is this the same as the objOid in other calls?? thomas - 1998-01-11 + @@ -1944,7 +1951,9 @@ Unix file name. None -XXX Does this return a lobjId? Is that the same as the objOid in other calls? thomas - 1998-01-11 + + XXX Does this return a lobjId? Is that the same as the objOid in other calls? thomas - 1998-01-11 + diff --git a/doc/src/sgml/libpq++.sgml b/doc/src/sgml/libpq++.sgml index 2bcb5003f0..6d6100a65c 100644 --- a/doc/src/sgml/libpq++.sgml +++ b/doc/src/sgml/libpq++.sgml @@ -1,9 +1,9 @@ - libpq++ - C++ Binding Library + <application>libpq++</application> - C++ Binding Library libpq++ is the C++ API to @@ -74,7 +74,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: libpq++ uses only environment variables or libpq's PQconnectdb - conninfo style strings. + conninfo style strings. @@ -84,7 +84,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: PGHOST sets the default server name. If this begins with a slash, it specifies Unix-domain communication rather than TCP/IP communication; the value is the name of the - directory in which the socket file is stored (default "/tmp"). + directory in which the socket file is stored (default /tmp). @@ -103,7 +103,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: PGUSER - sets the username used to connect to the database and for authentication. + sets the user name used to connect to the database and for authentication. @@ -133,7 +133,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: - PGTTY sets the file or tty on which debugging + PGTTY sets the file or tty on which debugging messages from the backend server are displayed. @@ -182,7 +182,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: - libpq++ Classes + <application>libpq++</application> Classes Connection Class: <classname>PgConnection</classname> @@ -199,7 +199,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: The database class provides C++ objects that have a connection to a backend server. To create such an object one first needs - the apropriate environment for the backend to access. + the appropriate environment for the backend to access. The following constructors deal with making a connection to a backend server from a C++ program. @@ -218,7 +218,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: PgConnection::PgConnection(const char *conninfo) Although typically called from one of the access classes, a connection to - a backend server is possible by creating a PgConnection object. + a backend server is possible by creating a PgConnection object. @@ -250,10 +250,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: PgDatabase(const char *conninfo) - After a PgDatabase has been created it should be checked to make sure - the connection to the database succeded before sending + After a PgDatabase has been created it should be checked to make sure + the connection to the database succeeded before sending queries to the object. This can easily be done by - retrieving the current status of the PgDatabase object with the + retrieving the current status of the PgDatabase object with the Status or ConnectionBad methods. @@ -274,7 +274,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: PGnotify* PgConnection::Notifies() - See PQnotifies() for details. + See PQnotifies for details. @@ -300,28 +300,28 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: - PGRES_EMPTY_QUERY + PGRES_EMPTY_QUERY - PGRES_COMMAND_OK, if the query was a command + PGRES_COMMAND_OK, if the query was a command - PGRES_TUPLES_OK, if the query successfully returned tuples + PGRES_TUPLES_OK, if the query successfully returned tuples - PGRES_COPY_OUT + PGRES_COPY_OUT - PGRES_COPY_IN + PGRES_COPY_IN - PGRES_BAD_RESPONSE, if an unexpected response was received + PGRES_BAD_RESPONSE, if an unexpected response was received - PGRES_NONFATAL_ERROR + PGRES_NONFATAL_ERROR - PGRES_FATAL_ERROR + PGRES_FATAL_ERROR @@ -394,7 +394,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: FieldNum - PQfnumber Returns the field (attribute) index associated with + PQfnumber Returns the field (attribute) index associated with the given field name. int PgDatabase::FieldNum(const char* field_name) const @@ -463,41 +463,41 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: GetValue - Returns a single field (attribute) value of one tuple of a PGresult. + Returns a single field (attribute) value of one tuple of a PGresult. Tuple and field indices start at 0. const char *PgDatabase::GetValue(int tup_num, int field_num) const - For most queries, the value returned by GetValue is a null-terminated - ASCII string representation of the attribute value. But if BinaryTuples() - is TRUE, the value returned by GetValue is the binary representation + For most queries, the value returned by GetValue is a null-terminated + string representation of the attribute value. But if BinaryTuples + is TRUE, the value returned by GetValue is the binary representation of the type in the internal format of the backend server (but not including the size word, if the field is variable-length). It is then the programmer's responsibility to cast and convert the data to the correct C type. The - pointer returned by GetValue points to storage that is part of the - PGresult structure. One should not modify it, and one must explicitly + pointer returned by GetValue points to storage that is part of the + PGresult structure. One should not modify it, and one must explicitly copy the value into other storage if it is to be used past the lifetime - of the PGresult structure itself. BinaryTuples() is not yet implemented. + of the PGresult structure itself. BinaryTuples is not yet implemented. GetValue - Returns a single field (attribute) value of one tuple of a PGresult. + Returns a single field (attribute) value of one tuple of a PGresult. Tuple and field indices start at 0. const char *PgDatabase::GetValue(int tup_num, const char *field_name) const - For most queries, the value returned by GetValue is a null-terminated - ASCII string representation of the attribute value. But if BinaryTuples() - is TRUE, the value returned by GetValue is the binary representation + For most queries, the value returned by GetValue is a null-terminated + string representation of the attribute value. But if BinaryTuples + is TRUE, the value returned by GetValue is the binary representation of the type in the internal format of the backend server (but not including the size word, if the field is variable-length). It is then the programmer's responsibility to cast and convert the data to the correct C type. The - pointer returned by GetValue points to storage that is part of the - PGresult structure. One should not modify it, and one must explicitly + pointer returned by GetValue points to storage that is part of the + PGresult structure. One should not modify it, and one must explicitly copy the value into other storage if it is to be used past the lifetime - of the PGresult structure itself. BinaryTuples() is not yet implemented. + of the PGresult structure itself. BinaryTuples is not yet implemented. @@ -509,9 +509,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: int PgDatabase::GetLength(int tup_num, int field_num) const This is the actual data length for the particular data value, that - is the size of the object pointed to by GetValue. Note that for + is the size of the object pointed to by GetValue. Note that for ASCII-represented values, this size has little to do with the binary - size reported by PQfsize. + size reported by PQfsize. @@ -523,9 +523,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: int PgDatabase::GetLength(int tup_num, const char* field_name) const This is the actual data length for the particular data value, that - is the size of the object pointed to by GetValue. Note that for + is the size of the object pointed to by GetValue. Note that for ASCII-represented values, this size has little to do with the binary - size reported by PQfsize. + size reported by PQfsize. @@ -535,7 +535,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: bool GetIsNull(int tup_num, int field_num) const - Note that GetValue will return the empty string for null fields, not + Note that GetValue will return the empty string for null fields, not the NULL pointer. @@ -546,7 +546,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: bool GetIsNull(int tup_num, const char *field_name) const - Note that GetValue will return the empty string for null fields, not + Note that GetValue will return the empty string for null fields, not the NULL pointer. @@ -652,7 +652,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.30 2001/05/09 17:46: - In the past, the documentation has associated the names used for asyncronous + In the past, the documentation has associated the names used for asynchronous notification with relations or classes. However, there is in fact no direct linkage of the two concepts in the implementation, and the named semaphore in fact does not need to have a corresponding relation diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 90de7811b7..e00e735ca5 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,25 +1,25 @@ - libpq - C Library + <application>libpq</application> - C Library libpq - libpq is the C + libpq is the C application programmer's interface to - Postgres. libpq is a set + PostgreSQL. libpq is a set of library routines that allow client programs to pass queries to the - Postgres backend server and to receive the - results of these queries. libpq is also the - underlying engine for several other Postgres - application interfaces, including libpq++ (C++), + PostgreSQL backend server and to receive the + results of these queries. libpq is also the + underlying engine for several other PostgreSQL + application interfaces, including libpq++ (C++), libpgtcl (Tcl), Perl, and - ecpg. So some aspects of libpq's behavior will be + ecpg. So some aspects of libpq's behavior will be important to you if you use one of those packages. @@ -47,7 +47,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/libpq.sgml,v 1.70 2001/09/08 16:46:34 peter The following routines deal with making a connection to a - Postgres backend server. The + PostgreSQL backend server. The application program can have several backend connections open at one time. (One reason to do that is to access more than one database.) Each connection is represented by a @@ -69,10 +69,10 @@ PGconn *PQconnectdb(const char *conninfo) This routine opens a new database connection using the parameters taken - from the string conninfo. Unlike PQsetdbLogin() below, + from the string conninfo. Unlike PQsetdbLogin below, the parameter set can be extended without changing the function signature, - so use either of this routine or the non-blocking analogues PQconnectStart - / PQconnectPoll is prefered for application programming. The passed string + so use either of this routine or the non-blocking analogues PQconnectStart + and PQconnectPoll is preferred for application programming. The passed string can be empty to use all default parameters, or it can contain one or more parameter settings separated by whitespace. @@ -106,25 +106,25 @@ PGconn *PQconnectdb(const char *conninfo) IP address of host to connect to. This should be in standard - numbers-and-dots form, as used by the BSD functions inet_aton et al. If + numbers-and-dots form, as used by the BSD functions inet_aton et al. If a non-zero-length string is specified, TCP/IP communication is used. - Using hostaddr instead of host allows the application to avoid a host + Using hostaddr instead of host allows the application to avoid a host name look-up, which may be important in applications with time constraints. However, Kerberos authentication requires the host name. The following therefore applies. If host is specified without - hostaddr, a hostname look-up is forced. If hostaddr is specified without - host, the value for hostaddr gives the remote address; if Kerberos is - used, this causes a reverse name query. If both host and hostaddr are - specified, the value for hostaddr gives the remote address; the value + hostaddr, a host name lookup is forced. If hostaddr is specified without + host, the value for hostaddr gives the remote address; if Kerberos is + used, this causes a reverse name query. If both host and hostaddr are + specified, the value for hostaddr gives the remote address; the value for host is ignored, unless Kerberos is used, in which case that value is used for Kerberos authentication. Note that authentication is likely - to fail if libpq is passed a host name that is not the name of the - machine at hostaddr. + to fail if libpq is passed a host name that is not the name of the + machine at hostaddr. - Without either a host name or host address, libpq will connect using a + Without either a host name or host address, libpq will connect using a local Unix domain socket. @@ -135,7 +135,7 @@ PGconn *PQconnectdb(const char *conninfo) Port number to connect to at the server host, - or socket filename extension for Unix-domain connections. + or socket file name extension for Unix-domain connections. @@ -180,7 +180,7 @@ PGconn *PQconnectdb(const char *conninfo) tty - A file or tty for optional debug output from the backend. + A file or tty for optional debug output from the backend. @@ -189,7 +189,7 @@ PGconn *PQconnectdb(const char *conninfo) requiressl - Set to '1' to require SSL connection to the backend. Libpq + Set to '1' to require SSL connection to the backend. Libpq will then refuse to connect if the server does not support SSL. Set to '0' (default) to negotiate with server. @@ -235,8 +235,8 @@ PGconn *PQsetdb(char *pghost, char *pgtty, char *dbName) - This is a macro that calls PQsetdbLogin() with null pointers - for the login and pwd parameters. It is provided primarily + This is a macro that calls PQsetdbLogin with null pointers + for the login and pwd parameters. It is provided primarily for backward compatibility with old programs. @@ -258,24 +258,24 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) The database connection is made using the parameters taken from the string - conninfo, passed to PQconnectStart. This string is in - the same format as described above for PQconnectdb. + conninfo, passed to PQconnectStart. This string is in + the same format as described above for PQconnectdb. - Neither PQconnectStart nor PQconnectPoll will block, as long as a number of + Neither PQconnectStart nor PQconnectPoll will block, as long as a number of restrictions are met: - The hostaddr and host parameters are used appropriately to ensure that + The hostaddr and host parameters are used appropriately to ensure that name and reverse name queries are not made. See the documentation of - these parameters under PQconnectdb above for details. + these parameters under PQconnectdb above for details. - If you call PQtrace, ensure that the stream object into which you trace + If you call PQtrace, ensure that the stream object into which you trace will not block. @@ -283,7 +283,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) You ensure for yourself that the socket is in the appropriate state - before calling PQconnectPoll, as described below. + before calling PQconnectPoll, as described below. @@ -291,68 +291,91 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) To begin, call conn=PQconnectStart("<connection_info_string>"). - If conn is NULL, then libpq has been unable to allocate a new PGconn - structure. Otherwise, a valid PGconn pointer is returned (though not yet + If conn is NULL, then libpq has been unable to allocate a new PGconn + structure. Otherwise, a valid PGconn pointer is returned (though not yet representing a valid connection to the database). On return from - PQconnectStart, call status=PQstatus(conn). If status equals - CONNECTION_BAD, PQconnectStart has failed. + PQconnectStart, call status=PQstatus(conn). If status equals + CONNECTION_BAD, PQconnectStart has failed. - If PQconnectStart succeeds, the next stage is to poll libpq so that it may + If PQconnectStart succeeds, the next stage is to poll libpq so that it may proceed with the connection sequence. Loop thus: Consider a connection - 'inactive' by default. If PQconnectPoll last returned PGRES_POLLING_ACTIVE, - consider it 'active' instead. If PQconnectPoll(conn) last returned - PGRES_POLLING_READING, perform a select for reading on PQsocket(conn). If - it last returned PGRES_POLLING_WRITING, perform a select for writing on - PQsocket(conn). If you have yet to call PQconnectPoll, i.e. after the call - to PQconnectStart, behave as if it last returned PGRES_POLLING_WRITING. If - the select shows that the socket is ready, consider it 'active'. If it has - been decided that this connection is 'active', call PQconnectPoll(conn) - again. If this call returns PGRES_POLLING_FAILED, the connection procedure - has failed. If this call returns PGRES_POLLING_OK, the connection has been + inactive by default. If PQconnectPoll last returned PGRES_POLLING_ACTIVE, + consider it active instead. If PQconnectPoll(conn) last returned + PGRES_POLLING_READING, perform a select for reading on PQsocket(conn). If + it last returned PGRES_POLLING_WRITING, perform a select for writing on + PQsocket(conn). If you have yet to call PQconnectPoll, i.e. after the call + to PQconnectStart, behave as if it last returned PGRES_POLLING_WRITING. If + the select shows that the socket is ready, consider it active. If it has + been decided that this connection is active, call PQconnectPoll(conn) + again. If this call returns PGRES_POLLING_FAILED, the connection procedure + has failed. If this call returns PGRES_POLLING_OK, the connection has been successfully made. + - Note that the use of select() to ensure that the socket is ready is merely + Note that the use of select() to ensure that the socket is ready is merely a (likely) example; those with other facilities available, such as a - poll() call, may of course use that instead. + poll() call, may of course use that instead. + At any time during connection, the status of the connection may be - checked, by calling PQstatus. If this is CONNECTION_BAD, then the - connection procedure has failed; if this is CONNECTION_OK, then the + checked, by calling PQstatus. If this is CONNECTION_BAD, then the + connection procedure has failed; if this is CONNECTION_OK, then the connection is ready. Either of these states should be equally detectable - from the return value of PQconnectPoll, as above. Other states may be + from the return value of PQconnectPoll, as above. Other states may be shown during (and only during) an asynchronous connection procedure. These indicate the current stage of the connection procedure, and may be useful to provide feedback to the user for example. These statuses may include: - - - - CONNECTION_STARTED: Waiting for connection to be made. - - - - - CONNECTION_MADE: Connection OK; waiting to send. - - - - - CONNECTION_AWAITING_RESPONSE: Waiting for a response from the postmaster. - - - - - CONNECTION_AUTH_OK: Received authentication; waiting for backend start-up. - - - - - CONNECTION_SETENV: Negotiating environment. - - - + + + + CONNECTION_STARTED + + + Waiting for connection to be made. + + + + + + CONNECTION_MADE + + + Connection OK; waiting to send. + + + + + + CONNECTION_AWAITING_RESPONSE + + + Waiting for a response from the postmaster. + + + + + + CONNECTION_AUTH_OK + + + Received authentication; waiting for backend start-up. + + + + + + CONNECTION_SETENV + + + + Negotiating environment. + + + + Note that, although these constants will remain (in order to maintain compatibility) an application should never rely upon these appearing in a @@ -376,16 +399,20 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) } + - Note that if PQconnectStart returns a non-NULL pointer, you must call - PQfinish when you are finished with it, in order to dispose of + Note that if PQconnectStart returns a non-NULL pointer, you must call + PQfinish when you are finished with it, in order to dispose of the structure and any associated memory blocks. This must be done even if a - call to PQconnectStart or PQconnectPoll failed. + call to PQconnectStart or PQconnectPoll failed. + - PQconnectPoll will currently block if libpq is compiled with USE_SSL + PQconnectPoll will currently block if + libpq is compiled with USE_SSL defined. This restriction may be removed in the future. + These functions leave the socket in a non-blocking state as if PQsetnonblocking had been called. @@ -414,22 +441,24 @@ struct PQconninfoOption } Returns a connection options array. This may - be used to determine all possible PQconnectdb options and their + be used to determine all possible PQconnectdb options and their current default values. The return value points to an array of - PQconninfoOption structs, which ends with an entry having a NULL - keyword pointer. Note that the default values ("val" fields) + PQconninfoOption structs, which ends with an entry having a NULL + keyword pointer. Note that the default values (val fields) will depend on environment variables and other context. Callers must treat the connection options data as read-only. + After processing the options array, free it by passing it to - PQconninfoFree(). If this is not done, a small amount of memory - is leaked for each call to PQconndefaults(). + PQconninfoFree. If this is not done, a small amount of memory + is leaked for each call to PQconndefaults. + - In Postgres versions before 7.0, PQconndefaults() returned a pointer + In PostgreSQL versions before 7.0, PQconndefaults returned a pointer to a static array, rather than a dynamically allocated array. That - wasn't thread-safe, so the behavior has been changed. + was not thread-safe, so the behavior has been changed. @@ -437,14 +466,14 @@ struct PQconninfoOption PQfinish Close the connection to the backend. Also frees - memory used by the PGconn object. + memory used by the PGconn object. void PQfinish(PGconn *conn) Note that even if the backend connection attempt fails (as - indicated by PQstatus), the application should call PQfinish - to free the memory used by the PGconn object. - The PGconn pointer should not be used after PQfinish has been called. + indicated by PQstatus), the application should call PQfinish + to free the memory used by the PGconn object. + The PGconn pointer should not be used after PQfinish has been called. @@ -477,14 +506,14 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn); These functions will close the connection to the backend and attempt to reestablish a new connection to the same postmaster, using all the same parameters previously used. This may be useful for error recovery if a - working connection is lost. They differ from PQreset (above) in that they + working connection is lost. They differ from PQreset (above) in that they act in a non-blocking manner. These functions suffer from the same - restrictions as PQconnectStart and PQconnectPoll. + restrictions as PQconnectStart and PQconnectPoll. - Call PQresetStart. If it returns 0, the reset has failed. If it returns 1, - poll the reset using PQresetPoll in exactly the same way as you would - create the connection using PQconnectPoll. + Call PQresetStart. If it returns 0, the reset has failed. If it returns 1, + poll the reset using PQresetPoll in exactly the same way as you would + create the connection using PQconnectPoll. @@ -492,13 +521,13 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn); -libpq application programmers should be careful to -maintain the PGconn abstraction. Use the accessor functions below to get -at the contents of PGconn. Avoid directly referencing the fields of the -PGconn structure because they are subject to change in the future. -(Beginning in Postgres release 6.4, the -definition of struct PGconn is not even provided in libpq-fe.h. -If you have old code that accesses PGconn fields directly, you can keep using it +libpq application programmers should be careful to +maintain the PGconn abstraction. Use the accessor functions below to get +at the contents of PGconn. Avoid directly referencing the fields of the +PGconn structure because they are subject to change in the future. +(Beginning in PostgreSQK release 6.4, the +definition of struct PGconn is not even provided in libpq-fe.h. +If you have old code that accesses PGconn fields directly, you can keep using it by including libpq-int.h too, but you are encouraged to fix the code soon.) @@ -509,8 +538,8 @@ soon.) char *PQdb(const PGconn *conn) -PQdb and the next several functions return the values established -at connection. These values are fixed for the life of the PGconn +PQdb and the next several functions return the values established +at connection. These values are fixed for the life of the PGconn object. @@ -558,7 +587,7 @@ char *PQport(const PGconn *conn) PQtty - Returns the debug tty of the connection. + Returns the debug tty of the connection. char *PQtty(const PGconn *conn) @@ -603,7 +632,7 @@ ConnStatusType PQstatus(const PGconn *conn) - See the entry for PQconnectStart and PQconnectPoll with regards + See the entry for PQconnectStart and PQconnectPoll with regards to other status codes that might be seen. @@ -620,9 +649,9 @@ char *PQerrorMessage(const PGconn* conn); - Nearly all libpq functions will set + Nearly all libpq functions will set PQerrorMessage if they fail. - Note that by libpq convention, a non-empty + Note that by libpq convention, a non-empty PQerrorMessage will include a trailing newline. @@ -655,13 +684,13 @@ int PQbackendPID(const PGconn *conn); SSL *PQgetssl(const PGconn *conn); This structure can be used to verify encryption levels, check - server certificate and more. Refer to the OpenSSL documentation + server certificate and more. Refer to the SSL documentation for information about this structure. You must define USE_SSL in order to get the prototype for this function. Doing this will also - automatically include ssl.h from OpenSSL. + automatically include ssl.h from OpenSSL. @@ -684,19 +713,19 @@ SQL queries and commands. PQexec - Submit a query to Postgres + Submit a query to the server and wait for the result. PGresult *PQexec(PGconn *conn, const char *query); - Returns a PGresult pointer or possibly a NULL pointer. + Returns a PGresult pointer or possibly a NULL pointer. A non-NULL pointer will generally be returned except in out-of-memory conditions or serious errors such as inability to send the query to the backend. If a NULL is returned, it - should be treated like a PGRES_FATAL_ERROR result. Use - PQerrorMessage to get more information about the error. + should be treated like a PGRES_FATAL_ERROR result. Use + PQerrorMessage to get more information about the error. @@ -705,13 +734,13 @@ PGresult *PQexec(PGconn *conn, The PGresult structure encapsulates the query result returned by the backend. libpq application programmers should be careful to -maintain the PGresult abstraction. Use the accessor functions below to get -at the contents of PGresult. Avoid directly referencing the fields of the -PGresult structure because they are subject to change in the future. -(Beginning in Postgres release 6.4, the -definition of struct PGresult is not even provided in libpq-fe.h. If you -have old code that accesses PGresult fields directly, you can keep using it -by including libpq-int.h too, but you are encouraged to fix the code +maintain the PGresult abstraction. Use the accessor functions below to get +at the contents of PGresult. Avoid directly referencing the fields of the +PGresult structure because they are subject to change in the future. +(Beginning in PostgreSQL 6.4, the +definition of struct PGresult is not even provided in libpq-fe.h. If you +have old code that accesses PGresult fields directly, you can keep using it +by including libpq-int.h too, but you are encouraged to fix the code soon.) @@ -723,7 +752,8 @@ soon.) ExecStatusType PQresultStatus(const PGresult *res) -PQresultStatus can return one of the following values: +PQresultStatus can return one of the following values: + PGRES_EMPTY_QUERY -- The string sent to the backend was empty. @@ -783,10 +813,10 @@ char *PQresultErrorMessage(const PGresult *res); Immediately following a PQexec or PQgetResult call, PQerrorMessage (on the connection) will return the same string as PQresultErrorMessage (on the result). However, a -PGresult will retain its error message +PGresult will retain its error message until destroyed, whereas the connection's error message will change when subsequent operations are done. Use PQresultErrorMessage when you want to -know the status associated with a particular PGresult; use PQerrorMessage +know the status associated with a particular PGresult; use PQerrorMessage when you want to know the status from the latest operation on the connection. @@ -794,13 +824,13 @@ when you want to know the status from the latest operation on the connection. PQclear - Frees the storage associated with the PGresult. - Every query result should be freed via PQclear when + Frees the storage associated with the PGresult. + Every query result should be freed via PQclear when it is no longer needed. void PQclear(PQresult *res); - You can keep a PGresult object around for as long as you + You can keep a PGresult object around for as long as you need it; it does not go away when you issue a new query, nor even if you close the connection. To get rid of it, you must call PQclear. Failure to do this will @@ -811,17 +841,17 @@ void PQclear(PQresult *res); PQmakeEmptyPGresult - Constructs an empty PGresult object with the given status. + Constructs an empty PGresult object with the given status. PGresult* PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status); -This is libpq's internal routine to allocate and initialize an empty -PGresult object. It is exported because some applications find it +This is libpq's internal routine to allocate and initialize an empty +PGresult object. It is exported because some applications find it useful to generate result objects (particularly objects with error -status) themselves. If conn is not NULL and status indicates an error, -the connection's current errorMessage is copied into the PGresult. -Note that PQclear should eventually be called on the object, just -as with a PGresult returned by libpq itself. +status) themselves. If conn is not NULL and status indicates an error, +the connection's current errorMessage is copied into the PGresult. +Note that PQclear should eventually be called on the object, just +as with a PGresult returned by libpq itself. @@ -931,8 +961,8 @@ Oid PQftype(const PGresult *res, int field_index); You can query the system table pg_type to obtain -the name and properties of the various datatypes. The OIDs -of the built-in datatypes are defined in src/include/catalog/pg_type.h +the name and properties of the various data types. The OIDs +of the built-in data types are defined in src/include/catalog/pg_type.h in the source tree. @@ -960,7 +990,7 @@ int PQfmod(const PGresult *res, int PQfsize(const PGresult *res, int field_index); - PQfsize returns the space allocated for this field in a database + PQfsize returns the space allocated for this field in a database tuple, in other words the size of the server's binary representation of the data type. -1 is returned if the field is variable size. @@ -990,7 +1020,7 @@ extracts data from a BINARY cursor. PQgetvalue Returns a single field (attribute) value of one tuple - of a PGresult. + of a PGresult. Tuple and field indices start at 0. char* PQgetvalue(const PGresult *res, @@ -1007,10 +1037,10 @@ type in the internal format of the backend server It is then the programmer's responsibility to cast and convert the data to the correct C type. The pointer returned by PQgetvalue points to storage that is -part of the PGresult structure. One should not modify it, +part of the PGresult structure. One should not modify it, and one must explicitly copy the value into other storage if it is to -be used past the lifetime of the PGresult structure itself. +be used past the lifetime of the PGresult structure itself. @@ -1025,7 +1055,7 @@ int PQgetisnull(const PGresult *res, int field_num); This function returns 1 if the field contains a NULL, 0 if - it contains a non-null value. (Note that PQgetvalue + it contains a non-null value. (Note that PQgetvalue will return an empty string, not a null pointer, for a NULL field.) @@ -1042,8 +1072,8 @@ int PQgetlength(const PGresult *res, int field_num); This is the actual data length for the particular data value, that is the -size of the object pointed to by PQgetvalue. Note that for ASCII-represented -values, this size has little to do with the binary size reported by PQfsize. +size of the object pointed to by PQgetvalue. Note that for character-represented +values, this size has little to do with the binary size reported by PQfsize. @@ -1086,7 +1116,7 @@ function is no longer actively supported. PQcmdStatus Returns the command status string from the SQL command that - generated the PGresult. + generated the PGresult. char * PQcmdStatus(const PGresult *res); @@ -1101,7 +1131,7 @@ char * PQcmdStatus(const PGresult *res); char * PQcmdTuples(const PGresult *res); If the SQL command that generated the - PGresult was INSERT, UPDATE or DELETE, this returns a + PGresult was INSERT, UPDATE or DELETE, this returns a string containing the number of rows affected. If the command was anything else, it returns the empty string. @@ -1168,8 +1198,8 @@ done from a signal handler, but not otherwise.) -PQexec can return only one PGresult structure. If the submitted query -string contains multiple SQL commands, all but the last PGresult are +PQexec can return only one PGresult structure. If the submitted query +string contains multiple SQL commands, all but the last PGresult are discarded by PQexec. @@ -1202,8 +1232,8 @@ connection to the backend. int PQsetnonblocking(PGconn *conn, int arg) - Sets the state of the connection to nonblocking if arg is TRUE, - blocking if arg is FALSE. Returns 0 if OK, -1 if error. + Sets the state of the connection to nonblocking if arg is 1, + blocking if arg is 0. Returns 0 if OK, -1 if error. In the nonblocking state, calls to @@ -1219,7 +1249,7 @@ int PQsetnonblocking(PGconn *conn, int arg) completes. - More of libpq is expected to be made safe for + More of libpq is expected to be made safe for PQsetnonblocking functionality in the near future. @@ -1231,17 +1261,17 @@ int PQsetnonblocking(PGconn *conn, int arg) int PQisnonblocking(const PGconn *conn) - Returns TRUE if the connection is set to non-blocking mode, - FALSE if blocking. + Returns 1 if the connection is set to non-blocking mode, + 0 if blocking. PQsendQuery - Submit a query to Postgres without - waiting for the result(s). TRUE is returned if the query was - successfully dispatched, FALSE if not (in which case, use + Submit a query to PostgreSQL without + waiting for the result(s). 1 is returned if the query was + successfully dispatched, 0 if not (in which case, use PQerrorMessage to get more information about the failure). int PQsendQuery(PGconn *conn, @@ -1400,7 +1430,7 @@ can also attempt to cancel a query that is still being processed by the backend. PQrequestCancel - Request that Postgres abandon + Request that PostgreSQL abandon processing of the current query. int PQrequestCancel(PGconn *conn); @@ -1430,7 +1460,7 @@ will abort the whole transaction. So, it is also possible to use it in conjunction with plain PQexec, if the decision to cancel can be made in a signal handler. For example, psql invokes -PQrequestCancel from a SIGINT signal handler, thus allowing +PQrequestCancel from a SIGINT signal handler, thus allowing interactive cancellation of queries that it issues through PQexec. Note that PQrequestCancel will have no effect if the connection is not currently open or the backend is not currently processing a query. @@ -1442,7 +1472,7 @@ is not currently open or the backend is not currently processing a query. Fast Path -Postgres provides a fast path interface to send +PostgreSQL provides a fast path interface to send function calls to the backend. This is a trapdoor into system internals and can be a potential security hole. Most users will not need this feature. @@ -1460,19 +1490,19 @@ PGresult* PQfn(PGconn* conn, const PQArgBlock *args, int nargs); - The fnid argument is the object identifier of the function to be + The fnid argument is the object identifier of the function to be executed. - result_buf is the buffer in which + result_buf is the buffer in which to place the return value. The caller must have allocated sufficient space to store the return value (there is no check!). The actual result length will be returned in the integer pointed - to by result_len. If a 4-byte integer result is expected, set - result_is_int to 1; otherwise set it to 0. (Setting result_is_int to 1 - tells libpq to byte-swap the value if necessary, so that it is + to by result_len. If a 4-byte integer result is expected, set + result_is_int to 1; otherwise set it to 0. (Setting result_is_int to 1 + tells libpq to byte-swap the value if necessary, so that it is delivered as a proper int value for the client machine. When result_is_int is 0, the byte string sent by the backend is returned unmodified.) - args and nargs specify the arguments to be passed to the function. + args and nargs specify the arguments to be passed to the function. typedef struct { int len; @@ -1483,9 +1513,9 @@ typedef struct { } u; } PQArgBlock; - PQfn always returns a valid PGresult*. The resultStatus + PQfn always returns a valid PGresult*. The resultStatus should be checked before the result is used. The - caller is responsible for freeing the PGresult with + caller is responsible for freeing the PGresult with PQclear when it is no longer needed. @@ -1498,11 +1528,11 @@ typedef struct { Asynchronous Notification -Postgres supports asynchronous notification via the -LISTEN and NOTIFY commands. A backend registers its interest in a particular -notification condition with the LISTEN command (and can stop listening -with the UNLISTEN command). All backends listening on a -particular condition will be notified asynchronously when a NOTIFY of that +PostgreSQL supports asynchronous notification via the +LISTEN and NOTIFY commands. A backend registers its interest in a particular +notification condition with the LISTEN command (and can stop listening +with the UNLISTEN command). All backends listening on a +particular condition will be notified asynchronously when a NOTIFY of that condition name is executed by any backend. No additional information is passed from the notifier to the listener. Thus, typically, any actual data that needs to be communicated is transferred through a database relation. @@ -1511,9 +1541,9 @@ not necessary for there to be any associated relation. -libpq applications submit LISTEN and UNLISTEN -commands as ordinary SQL queries. Subsequently, arrival of NOTIFY -messages can be detected by calling PQnotifies(). +libpq applications submit LISTEN and UNLISTEN +commands as ordinary SQL queries. Subsequently, arrival of NOTIFY +messages can be detected by calling PQnotifies. @@ -1533,14 +1563,14 @@ typedef struct pgNotify { int be_pid; /* process id of backend */ } PGnotify; -After processing a PGnotify object returned by PQnotifies, +After processing a PGnotify object returned by PQnotifies, be sure to free it with free() to avoid a memory leak. - In Postgres 6.4 and later, - the be_pid is the notifying backend's, - whereas in earlier versions it was always your own backend's PID. + In PostgreSQL 6.4 and later, + the be_pid is that of the notifying backend, + whereas in earlier versions it was always the PID of your own backend. @@ -1589,7 +1619,7 @@ if any notifications came in during the processing of the query. - The COPY command in Postgres has options to read from + The COPY command in PostgreSQL has options to read from or write to the network connection used by libpq. Therefore, functions are necessary to access this network connection directly so applications may take advantage of this capability. @@ -1660,7 +1690,7 @@ application should call PQconsumeInput and end-of-data signal is detected. Unlike PQgetline, this routine takes responsibility for detecting end-of-data. On each call, PQgetlineAsync will return data if a complete newline- -terminated data line is available in libpq's input buffer, or if the +terminated data line is available in libpq's input buffer, or if the incoming data line is too long to fit in the buffer offered by the caller. Otherwise, no data is returned until the rest of the line arrives. @@ -1675,7 +1705,7 @@ the caller is too small to hold a line sent by the backend, then a partial data line will be returned. This can be detected by testing whether the last returned byte is "\n" or not. The returned string is not null-terminated. (If you want to add a -terminating null, be sure to pass a bufsize one smaller than the room +terminating null, be sure to pass a bufsize one smaller than the room actually available.) @@ -1788,7 +1818,7 @@ void PQtrace(PGconn *conn PQuntrace - Disable tracing started by PQtrace + Disable tracing started by PQtrace. void PQuntrace(PGconn *conn) @@ -1842,7 +1872,7 @@ defaultNoticeProcessor(void * arg, const char * message) To use a special notice processor, call PQsetNoticeProcessor just after -creation of a new PGconn object. +creation of a new PGconn object. @@ -1853,9 +1883,9 @@ but the current pointer is returned. Once you have set a notice processor, you should expect that that function -could be called as long as either the PGconn object or PGresult objects -made from it exist. At creation of a PGresult, the PGconn's current -notice processor pointer is copied into the PGresult for possible use by +could be called as long as either the PGconn object or PGresult objects +made from it exist. At creation of a PGresult, the PGconn's current +notice processor pointer is copied into the PGresult for possible use by routines like PQgetvalue. @@ -1884,7 +1914,7 @@ application programs. PGHOST sets the default server name. If this begins with a slash, it specifies Unix-domain communication rather than TCP/IP communication; the value is the name of the -directory in which the socket file is stored (default "/tmp"). +directory in which the socket file is stored (default /tmp). @@ -1894,7 +1924,7 @@ directory in which the socket file is stored (default "/tmp"). PGPORT sets the default TCP port number or Unix-domain socket file extension for communicating with the -Postgres backend. +PostgreSQL backend. @@ -1903,7 +1933,7 @@ socket file extension for communicating with the PGDATABASE PGDATABASE sets the default -Postgres database name. +PostgreSQL database name. @@ -1912,7 +1942,7 @@ socket file extension for communicating with the PGUSER PGUSER -sets the username used to connect to the database and for authentication. +sets the user name used to connect to the database and for authentication. @@ -1927,8 +1957,8 @@ sets the password used if the backend demands password authentication. PGREALM sets the Kerberos realm to use with -Postgres, if it is different from the local realm. -If PGREALM is set, Postgres +PostgreSQL, if it is different from the local realm. +If PGREALM is set, PostgreSQL applications will attempt authentication with servers for this realm and use separate ticket files to avoid conflicts with local ticket files. This environment variable is only @@ -1938,7 +1968,7 @@ used if Kerberos authentication is selected by the backend. PGOPTIONS sets additional runtime options for -the Postgres backend. +the PostgreSQL backend. @@ -1952,7 +1982,7 @@ messages from the backend server are displayed. The following environment variables can be used to specify user-level default -behavior for every Postgres session: +behavior for every PostgreSQL session: @@ -1971,7 +2001,7 @@ sets the default time zone. PGCLIENTENCODING sets the default client encoding (if MULTIBYTE support was selected -when configuring Postgres). +when configuring PostgreSQL). @@ -1979,7 +2009,7 @@ when configuring Postgres). The following environment variables can be used to specify default internal -behavior for every Postgres session: +behavior for every PostgreSQL session: @@ -2008,22 +2038,22 @@ for information on correct values for these environment variables. libpq is thread-safe as of -Postgres 7.0, so long as no two threads -attempt to manipulate the same PGconn object at the same time. In particular, -you can't issue concurrent queries from different threads through the same +PostgreSQL 7.0, so long as no two threads +attempt to manipulate the same PGconn object at the same time. In particular, +you cannot issue concurrent queries from different threads through the same connection object. (If you need to run concurrent queries, start up multiple connections.) -PGresult objects are read-only after creation, and so can be passed around +PGresult objects are read-only after creation, and so can be passed around freely between threads. The deprecated functions PQoidStatus and fe_setauthsvc are not thread-safe and should not be -used in multi-thread programs. PQoidStatus can be +used in multithread programs. PQoidStatus can be replaced by PQoidValue. There is no good reason to call fe_setauthsvc at all. @@ -2032,10 +2062,10 @@ call fe_setauthsvc at all. - Building Libpq Programs + Building <application>Libpq</application> Programs - To build (i.e., compile and link) your libpq programs you need to + To build (i.e., compile and link) your libpq programs you need to do all of the following things: @@ -2101,10 +2131,10 @@ testlibpq.c:8:22: libpq-fe.h: No such file or directory When linking the final program, specify the option - -lpq so that the libpq library gets pulled + -lpq so that the libpq library gets pulled in, as well as the option -Ldirectory to - point it to the directory where libpq resides. (Again, the + point it to the directory where the libpq library resides. (Again, the compiler will search some directories by default.) For maximum portability, put the option before the option. For example: @@ -2158,7 +2188,7 @@ testlibpq.o(.text+0xa4): undefined reference to `PQerrorMessage' Example Programs - libpq Example Program 1 + <application>libpq</application> Example Program 1 /* @@ -2291,7 +2321,7 @@ main() - libpq Example Program 2 + <application>libpq</application> Example Program 2 /* @@ -2411,7 +2441,7 @@ main() - libpq Example Program 3</> + <title><application>libpq</application> Example Program 3</> <programlisting> /* diff --git a/doc/src/sgml/lobj.sgml b/doc/src/sgml/lobj.sgml index 9467b85adf..25ebce1f64 100644 --- a/doc/src/sgml/lobj.sgml +++ b/doc/src/sgml/lobj.sgml @@ -1,5 +1,5 @@ <!-- -$Header: /cvsroot/pgsql/doc/src/sgml/lobj.sgml,v 1.18 2001/09/10 04:15:41 momjian Exp $ +$Header: /cvsroot/pgsql/doc/src/sgml/lobj.sgml,v 1.19 2001/09/10 21:58:47 petere Exp $ --> <chapter id="largeObjects"> @@ -40,7 +40,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/lobj.sgml,v 1.18 2001/09/10 04:15:41 momjia objects interchangeably to mean the same thing in this section.) Since <productname>PostgreSQL 7.1</productname> all large objects are placed in - one system table called pg_largeobject. + one system table called <classname>pg_largeobject</classname>. </para> </sect1> @@ -87,9 +87,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/lobj.sgml,v 1.18 2001/09/10 04:15:41 momjia <function>lseek(2)</function>, etc. User functions call these routines to retrieve only the data of interest from a large object. For example, if a large - object type called mugshot existed that stored + object type called <type>mugshot</type> existed that stored photographs of faces, then a function called beard could - be declared on mugshot data. Beard could look at the + be declared on <type>mugshot</type> data. Beard could look at the lower third of a photograph, and determine the color of the beard that appeared there, if any. The entire large object value need not be buffered, or even @@ -110,13 +110,13 @@ $Header: /cvsroot/pgsql/doc/src/sgml/lobj.sgml,v 1.18 2001/09/10 04:15:41 momjia Oid lo_creat(PGconn *<replaceable class="parameter">conn</replaceable>, int <replaceable class="parameter">mode</replaceable>) </synopsis> creates a new large object. - <replaceable class="parameter">mode</replaceable> is a bitmask + <replaceable class="parameter">mode</replaceable> is a bit mask describing several different attributes of the new object. The symbolic constants listed here are defined in <filename>$<envar>PGROOT</envar>/src/backend/libpq/libpq-fs.h</filename> The access type (read, write, or both) is controlled by - OR ing together the bits <acronym>INV_READ</acronym> and + OR'ing together the bits <acronym>INV_READ</acronym> and <acronym>INV_WRITE</acronym>. The low-order sixteen bits of mask are the storage manager number on which the large object should reside. For sites other than Berkeley, these @@ -137,7 +137,7 @@ inv_oid = lo_creat(INV_READ|INV_WRITE); Oid lo_import(PGconn *<replaceable class="parameter">conn</replaceable>, const char *<replaceable class="parameter">filename</replaceable>) </synopsis> <replaceable class="parameter">filename</replaceable> - specifies the <acronym>Unix</acronym> pathname of + specifies the <acronym>Unix</acronym> path name of the file to be imported as a large object. </para> </sect2> @@ -151,9 +151,9 @@ Oid lo_import(PGconn *<replaceable class="parameter">conn</replaceable>, const c <synopsis> int lo_export(PGconn *<replaceable class="parameter">conn</replaceable>, Oid <replaceable class="parameter">lobjId</replaceable>, const char *<replaceable class="parameter">filename</replaceable>) </synopsis> - The lobjId argument specifies the Oid of the large - object to export and the filename argument specifies - the <acronym>UNIX</acronym> pathname of the file. + The <parameter>lobjId</parameter> argument specifies the Oid of the large + object to export and the <parameter>filename</parameter> argument specifies + the <acronym>UNIX</acronym> path name of the file. </para> </sect2> @@ -165,9 +165,9 @@ int lo_export(PGconn *<replaceable class="parameter">conn</replaceable>, Oid <re <synopsis> int lo_open(PGconn *conn, Oid lobjId, int mode) </synopsis> - The lobjId argument specifies the Oid of the large - object to open. The mode bits control whether the - object is opened for reading INV_READ), writing or + The <parameter>lobjId</parameter> argument specifies the Oid of the large + object to open. The <parameter>mode</parameter> bits control whether the + object is opened for reading (<symbol>INV_READ</>), writing or both. A large object cannot be opened before it is created. <function>lo_open</function> returns a large object descriptor @@ -185,7 +185,7 @@ int lo_open(PGconn *conn, Oid lobjId, int mode) <programlisting> int lo_write(PGconn *conn, int fd, const char *buf, size_t len) </programlisting> - writes len bytes from buf to large object fd. The fd + writes <parameter>len</parameter> bytes from <parameter>buf</parameter> to large object <parameter>fd</>. The <parameter>fd</parameter> argument must have been returned by a previous <function>lo_open</function>. The number of bytes actually written is returned. In the event of an error, the return value is negative. @@ -200,7 +200,7 @@ int lo_write(PGconn *conn, int fd, const char *buf, size_t len) <programlisting> int lo_read(PGconn *conn, int fd, char *buf, size_t len) </programlisting> - reads len bytes from large object fd into buf. The fd + reads <parameter>len</parameter> bytes from large object <parameter>fd</parameter> into <parameter>buf</parameter>. The <parameter>fd</parameter> argument must have been returned by a previous <function>lo_open</function>. The number of bytes actually read is returned. In the event of an error, the return value is negative. @@ -245,7 +245,7 @@ int lo_close(PGconn *conn, int fd) <synopsis> Oid lo_unlink(PGconn *<replaceable class="parameter">conn</replaceable>, Oid lobjId) </synopsis> - The lobjId argument specifies the Oid of the large + The <parameter>lobjId</parameter> argument specifies the Oid of the large object to remove. </para> </sect2> @@ -278,20 +278,20 @@ SELECT lo_export(image.raster, '/tmp/motd') from image </sect1> <sect1 id="lo-libpq"> -<title>Accessing Large Objects from LIBPQ +Accessing Large Objects from <application>Libpq</application> Below is a sample program which shows how the large object interface - in LIBPQ can be used. Parts of the program are + in libpq can be used. Parts of the program are commented out but are left in the source for the readers benefit. This program can be found in ../src/test/examples Frontend applications which use the large object interface - in LIBPQ should include the header file - libpq/libpq-fs.h and link with the libpq library. + in libpq should include the header file + libpq/libpq-fs.h and link with the libpq library. diff --git a/doc/src/sgml/odbc.sgml b/doc/src/sgml/odbc.sgml index b15227f76c..086dcd09ba 100644 --- a/doc/src/sgml/odbc.sgml +++ b/doc/src/sgml/odbc.sgml @@ -1,5 +1,5 @@ @@ -53,10 +53,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.22 2001/05/12 22:51:35 The backend access come from ODBC drivers, - or vendor specifc drivers that + or vendor-specific drivers that allow data access. psqlODBC is such a driver, along with others that are - available, such as the OpenLink ODBC drivers. + available, such as the OpenLink ODBC drivers. @@ -133,8 +133,8 @@ psql -d template1 -f LOCATION/odbc.sql Supported Platforms psqlODBC has been built and tested - on Linux. There have been reports of success - with FreeBSD and with Solaris. There are no known restrictions + on Linux. There have been reports of success + with FreeBSD and with Solaris. There are no known restrictions on the basic code for other platforms which already support Postgres. @@ -248,9 +248,9 @@ InstallDir = /opt/applix/axdata/axshlib - In Visual C++, you can use the CRecordSet class, which wraps the + In Visual C++, you can use the CRecordSet class, which wraps the ODBC API - set within an MFC 4.2 class. This is the easiest route if you are doing + set within an MFC 4.2 class. This is the easiest route if you are doing Windows C++ development under Windows NT. @@ -277,7 +277,7 @@ InstallDir = /opt/applix/axdata/axshlib - Visual Basic and the other RAD tools have Recordset objects + Visual Basic and the other RAD tools have Recordset objects that use ODBC directly to access data. Using the data-aware controls, you can quickly link to the ODBC back end database @@ -291,7 +291,7 @@ InstallDir = /opt/applix/axdata/axshlib - You'll have to set up a DSN first. + You'll have to set up a DSN first. @@ -299,7 +299,7 @@ InstallDir = /opt/applix/axdata/axshlib - ApplixWare + <application>ApplixWare</application> ApplixWare @@ -326,7 +326,7 @@ InstallDir = /opt/applix/axdata/axshlib - Enabling ApplixWare Database Access + Enabling <application>ApplixWare</application> Database Access These instructions are for the 4.4.2 release of @@ -341,7 +341,7 @@ InstallDir = /opt/applix/axdata/axshlib elfodbc can find libodbc.so (the ODBC driver manager) shared library. - This library is included with the ApplixWare distribution, + This library is included with the ApplixWare distribution, but axnet.cnf needs to be modified to point to the correct location. @@ -371,7 +371,7 @@ InstallDir = /opt/applix/axdata/axshlib libFor elfodbc applixroot/applix/axdata/axshlib/lib - which will tell elfodbc to look in this directory + which will tell elfodbc to look in this directory for the ODBC support library. Typically Applix is installed in /opt so the full path would be @@ -399,7 +399,7 @@ TextAsLongVarchar=0 - Testing ApplixWare ODBC Connections + Testing <application>ApplixWare</application> ODBC Connections @@ -424,7 +424,7 @@ TextAsLongVarchar=0 Select ODBC, and click Browse. The database you configured in .odbc.ini should be shown. Make sure that the - is empty (if it is not, axnet will try to contact axnet on another machine + is empty (if it is not, axnet will try to contact axnet on another machine to look for the database). @@ -436,14 +436,14 @@ TextAsLongVarchar=0 - Enter username and password in the login identification dialog, + Enter user name and password in the login identification dialog, and click OK. - You should see "Starting elfodbc server" + You should see Starting elfodbc server in the lower left corner of the data window. If you get an error dialog box, see the debugging section below. @@ -476,7 +476,7 @@ TextAsLongVarchar=0 - Cannot launch gateway on server + Cannot launch gateway on server @@ -487,9 +487,9 @@ TextAsLongVarchar=0 - + Error from ODBC Gateway: - IM003::[iODBC][Driver Manager]Specified driver could not be loaded + IM003::[iODBC][Driver Manager]Specified driver could not be loaded @@ -501,7 +501,7 @@ TextAsLongVarchar=0 - Server: Broken Pipe + Server: Broken Pipe @@ -516,15 +516,15 @@ TextAsLongVarchar=0 - setuid to 256: failed to launch gateway + setuid to 256: failed to launch gateway - The September release of ApplixWare v4.4.1 (the first release with official - ODBC support under Linux) shows problems when usernames + The September release of ApplixWare v4.4.1 (the first release with official + ODBC support under Linux) shows problems when user names exceed eight (8) characters in length. - Problem description ontributed by Steve Campbell + Problem description contributed by Steve Campbell (scampbell@lear.com). @@ -554,24 +554,24 @@ TextAsLongVarchar=0 - Debugging ApplixWare ODBC Connections + Debugging <application>ApplixWare</application> ODBC Connections One good tool for debugging connection problems uses the Unix system utility strace. - Debugging with strace + Debugging with <command>strace</command> - Start applixware. + Start ApplixWare. Start an strace on - the axnet process. For example, if + the axnet process. For example, if % ps -aucx | grep ax @@ -596,7 +596,7 @@ cary 27883 0.9 31.0 12692 4596 ? S 10:24 0:04 axmain - Check the strace output. + Check the strace output. Note from Cary @@ -613,24 +613,24 @@ cary 27883 0.9 31.0 12692 4596 ? S 10:24 0:04 axmain For example, after getting - a "Cannot launch gateway on server", - I ran strace on axnet and got + a Cannot launch gateway on server, + I ran strace on axnet and got - + [pid 27947] open("/usr/lib/libodbc.so", O_RDONLY) = -1 ENOENT (No such file or directory) [pid 27947] open("/lib/libodbc.so", O_RDONLY) = -1 ENOENT (No such file or directory) [pid 27947] write(2, "/usr2/applix/axdata/elfodbc: can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) - - So what is happening is that applix elfodbc is searching for libodbc.so, but it - can't find it. That is why axnet.cnf needed to be changed. + + So what is happening is that applix elfodbc is searching for libodbc.so, but it + cannot find it. That is why axnet.cnf needed to be changed. - Running the ApplixWare Demo + Running the <application>ApplixWare</application> Demo In order to go through the @@ -645,7 +645,7 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) - Modifying the ApplixWare Demo + Modifying the <application>ApplixWare</application> Demo @@ -683,7 +683,7 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) - Open the sqldemo.am file from the Macro Editor. + Open the sqldemo.am file from the Macro Editor. @@ -734,7 +734,7 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) You can add information about your - database login and password to the standard Applix start-up + database login and password to the standard Applix start-up macro file. This is an example ~/axhome/macros/login.am file: @@ -748,7 +748,7 @@ endmacro You should be careful about the file protections on any file containing - username and password information. + user name and password information. diff --git a/doc/src/sgml/plperl.sgml b/doc/src/sgml/plperl.sgml index 52d7ead387..5c8e68c403 100644 --- a/doc/src/sgml/plperl.sgml +++ b/doc/src/sgml/plperl.sgml @@ -1,5 +1,5 @@ @@ -21,10 +21,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/plperl.sgml,v 2.9 2001/06/22 21:37:14 momji The PL/Perl interpreter (when installed as trusted interpreter with - default name 'plperl') intepreter is a full Perl interpreter. However, certain + default name plperl) interpreter is a full Perl interpreter. However, certain operations have been disabled in order to maintain the security of the system. In general, the operations that are restricted are - those that interact with the environment. This includes filehandle + those that interact with the environment. This includes file handle operations, require, and use (for external modules). It should be noted that this security is not absolute. Indeed, several Denial-of-Service attacks are still @@ -32,8 +32,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/plperl.sgml,v 2.9 2001/06/22 21:37:14 momji - When PL/Perl is installed as 'untrusted' interpreter (with name 'plperlu'), - everything is permitted, and any perl code can be loaded (by superuser only). + When PL/Perl is installed as untrusted interpreter (with name plperlu), + everything is permitted, and any Perl code can be loaded (by superuser only). @@ -170,13 +170,13 @@ CREATE FUNCTION badfunc() RETURNS integer AS ' The creation of the function will succeed, but executing it will not. Note that if same function was created by superuser using language - 'plperlu', execution would succeed. + plperlu, execution would succeed. - Access to database itself from your perl function can be done via - an experimental module DBD::PgSPI, available at this site. This module makes available a DBI-compliant - database-handle named $pg_dbh, and you can use that to make queries with - normal DBI syntax. + Access to database itself from your Perl function can be done via + an experimental module DBD::PgSPI. This module makes available a DBI-compliant + database-handle named $pg_dbh, and you can use that to make queries with + normal DBI syntax. diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index f96f085afd..c8725e550f 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -1,4 +1,4 @@ - + PL/Python - Python Procedural Language @@ -49,9 +49,9 @@ def __plpython_procedure_myfunc_23456(): PostgreSQL function variables are available in the global args list. In the myfunc - example, args[0] contains whatever was passed in as the text - argument. For myfunc2(text, int4), args[0] - would contain the text variable and args[1] the int4 variable. + example, args[0] contains whatever was passed in as the text + argument. For myfunc2(text, integer), args[0] + would contain the text variable and args[1] the integer variable. @@ -59,12 +59,12 @@ def __plpython_procedure_myfunc_23456(): function calls. This variable is private static data. The global dictionary GD is public data, available to all python functions within a backend. Use with care. When the function is used in a - trigger, the triggers tuples are in TD["new"] and/or TD["old"] + trigger, the triggers tuples are in TD["new"] and/or TD["old"] depending on the trigger event. Return 'None' or "OK" from the python function to indicate the tuple is unmodified, "SKIP" to abort the event, or "MODIFIED" to indicate you've modified the tuple. If the trigger was called with arguments they are available - in TD["args"][0] to TD["args"][(n -1)] + in TD["args"][0] to TD["args"][(n -1)]. @@ -98,7 +98,7 @@ def __plpython_procedure_myfunc_23456(): - Additionally, the plpy module provides two functions called + Additionally, the plpy module provides two functions called execute and prepare. Calling plpy.execute with a query string, and an optional limit argument, causes that query to be run, and the diff --git a/doc/src/sgml/plsql.sgml b/doc/src/sgml/plsql.sgml index bd490d3f83..4c13580ef4 100644 --- a/doc/src/sgml/plsql.sgml +++ b/doc/src/sgml/plsql.sgml @@ -1,16 +1,16 @@ - PL/pgSQL - <acronym>SQL</acronym> Procedural Language + <application>PL/pgSQL</application> - <acronym>SQL</acronym> Procedural Language PL/pgSQL - PL/pgSQL is a loadable procedural language for the + PL/pgSQL is a loadable procedural language for the Postgres database system. @@ -24,7 +24,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/plsql.sgml,v 2.37 2001/09/10 06:35:34 Overview - The design goals of PL/pgSQL were to create a loadable procedural + The design goals of PL/pgSQL were to create a loadable procedural language that @@ -60,7 +60,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/plsql.sgml,v 2.37 2001/09/10 06:35:34 - The PL/pgSQL call handler parses the function's source text and + The PL/pgSQL call handler parses the function's source text and produces an internal binary instruction tree the first time the function is called. The produced bytecode is identified in the call handler by the object ID of the function. This ensures @@ -69,12 +69,12 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/plsql.sgml,v 2.37 2001/09/10 06:35:34 For all expressions and SQL statements used in - the function, the PL/pgSQL bytecode interpreter creates a + the function, the PL/pgSQL bytecode interpreter creates a prepared execution plan using the SPI manager's SPI_prepare() and SPI_saveplan() functions. This is done the first time the individual - statement is processed in the PL/pgSQL function. Thus, a function with + statement is processed in the PL/pgSQL function. Thus, a function with conditional code that contains many statements for which execution plans would be required, will only prepare and save those plans that are really used during the lifetime of the database @@ -102,18 +102,18 @@ END; - Because PL/pgSQL saves execution plans in this way, queries that appear - directly in a PL/pgSQL function must refer to the same tables and fields + Because PL/pgSQL saves execution plans in this way, queries that appear + directly in a PL/pgSQL function must refer to the same tables and fields on every execution; that is, you cannot use a parameter as the name of a table or field in a query. To get around - this restriction, you can construct dynamic queries using the PL/pgSQL + this restriction, you can construct dynamic queries using the PL/pgSQL EXECUTE statement --- at the price of constructing a new query plan on every execution. Except for input/output conversion and calculation functions for user defined types, anything that can be defined in C language - functions can also be done with PL/pgSQL. It is possible to + functions can also be done with PL/pgSQL. It is possible to create complex conditional computation functions and later use them to define operators or use them in functional indexes. @@ -162,13 +162,13 @@ END; - With PL/pgSQL you can group a block of computation and a + With PL/pgSQL you can group a block of computation and a series of queries inside the database server, thus having the power of a procedural language and the ease of use of SQL, but saving lots of time because you don't have the whole client/server communication overhead. Your application will enjoy a - considerable performance increase by using PL/pgSQL. + considerable performance increase by using PL/pgSQL. @@ -176,9 +176,9 @@ END; SQL Support - PL/pgSQL adds the power of a procedural language to the + PL/pgSQL adds the power of a procedural language to the flexibility and ease of SQL. With - PL/pgSQL you can use all the datatypes, columns, operators + PL/pgSQL you can use all the data types, columns, operators and functions of SQL. @@ -187,7 +187,7 @@ END; Portability - Because PL/pgSQL functions run inside PostgreSQL, these + Because PL/pgSQL functions run inside PostgreSQL, these functions will run on any platform where PostgreSQL runs. Thus you can reuse code and have less development costs. @@ -195,13 +195,13 @@ END; - Developing in PL/pgSQL + Developing in <application>PL/pgSQL</application> - Developing in PL/pgSQL is pretty straight forward, especially + Developing in PL/pgSQL is pretty straight forward, especially if you have developed in other database procedural languages, - such as Oracle's PL/SQL. Two good ways of developing in - PL/pgSQL are: + such as Oracle's PL/SQL. Two good ways of developing in + PL/pgSQL are: @@ -212,18 +212,18 @@ END; - Using PostgreSQL's GUI Tool: pgaccess + Using PostgreSQL's GUI Tool: PgAccess - One good way to develop in PL/pgSQL is to simply use the text + One good way to develop in PL/pgSQL is to simply use the text editor of your choice to create your functions, and in another console, use psql (PostgreSQL's interactive monitor) to load those functions. If you are doing it this way (and if you are - a PL/pgSQL novice or in debugging stage), it is a good idea to + a PL/pgSQL novice or in debugging stage), it is a good idea to always DROP your function before creating it. That way when you reload the file, it'll drop your functions and then re-create them. For example: @@ -240,15 +240,15 @@ end; When you load the file for the first time, PostgreSQL will raise a warning saying this function doesn't exist and go on to create it. To load an SQL - file (filename.sql) into a database named "dbname", use the command: + file (e.g., filename.sql) into a database named dbname, use the command: psql -f filename.sql dbname - Another good way to develop in PL/pgSQL is using - PostgreSQL's GUI tool: pgaccess. It does some + Another good way to develop in PL/pgSQL is using + PostgreSQL's GUI tool: PgAccess. It does some nice things for you, like escaping single-quotes, and making it easy to recreate and debug functions. @@ -263,10 +263,10 @@ psql -f filename.sql dbname - Structure of PL/pgSQL + Structure of <application>PL/pgSQL</application> - PL/pgSQL is a block structured language. All + PL/pgSQL is a block structured language. All keywords and identifiers can be used in mixed upper and lower-case. A block is defined as: @@ -316,8 +316,8 @@ END; It is important not to confuse the use of BEGIN/END for - grouping statements in PL/pgSQL with the database commands for - transaction control. PL/pgSQL's BEGIN/END are only for grouping; + grouping statements in PL/pgSQL with the database commands for + transaction control. PL/pgSQL's BEGIN/END are only for grouping; they do not start or end a transaction. Functions and trigger procedures are always executed within a transaction established by an outer query --- they cannot start or commit transactions, since @@ -329,7 +329,7 @@ END; Comments - There are two types of comments in PL/pgSQL. A double dash -- + There are two types of comments in PL/pgSQL. A double dash -- starts a comment that extends to the end of the line. A /* starts a block comment that extends to the next occurrence of */. Block comments cannot be nested, but double dash comments can be @@ -350,7 +350,7 @@ END; - PL/pgSQL variables can have any SQL datatype, such as + PL/pgSQL variables can have any SQL data type, such as INTEGER, VARCHAR and CHAR. All variables have as default value the SQL NULL value. @@ -437,7 +437,7 @@ END; Using the %TYPE and %ROWTYPE attributes, you can declare variables with the same - datatype or structure of another database item (e.g: a + data type or structure of another database item (e.g: a table field). @@ -448,13 +448,13 @@ END; - %TYPE provides the datatype of a + %TYPE provides the data type of a variable or database column. You can use this to declare variables that will hold database values. For example, let's say you have a column named user_id in your users table. To declare a variable with - the same datatype as users.user_id you write: + the same data type as users.user_id you write: user_id users.user_id%TYPE; @@ -462,8 +462,8 @@ user_id users.user_id%TYPE; By using %TYPE you don't need to know - the datatype of the structure you are referencing, - and most important, if the datatype of the + the data type of the structure you are referencing, + and most important, if the data type of the referenced item changes in the future (e.g: you change your table definition of user_id to become a REAL), you won't need to change your function @@ -474,11 +474,11 @@ user_id users.user_id%TYPE; - table%ROWTYPE + table%ROWTYPE - %ROWTYPE provides the composite datatype corresponding + %ROWTYPE provides the composite data type corresponding to a whole row of the specified table. table must be an existing table or view name of the database. The fields of the row are @@ -560,12 +560,12 @@ RENAME this_var TO that_var; Expressions - All expressions used in PL/pgSQL statements are processed using - the backend's executor. Expressions that appear to contain + All expressions used in PL/pgSQL statements are processed using + the executor of the server. Expressions that appear to contain constants may in fact require run-time evaluation (e.g. 'now' for the timestamp type) so - it is impossible for the PL/pgSQL parser + it is impossible for the PL/pgSQL parser to identify real constant values other than the NULL keyword. All expressions are evaluated internally by executing a query @@ -574,7 +574,7 @@ SELECT expression using the SPI manager. In the expression, occurrences of variable identifiers are substituted by parameters and the actual values from the variables are passed to the executor in the parameter array. All - expressions used in a PL/pgSQL function are only prepared and + expressions used in a PL/pgSQL function are only prepared and saved once. The only exception to this rule is an EXECUTE statement if parsing of a query is needed each time it is encountered. @@ -615,7 +615,7 @@ CREATE FUNCTION logfunc2 (text) RETURNS timestamp AS ' Postgres main parser knows when preparing the plan for the INSERT, that the string 'now' should be interpreted as - timestamp because the target field of logtable + timestamp because the target field of logtable is of that type. Thus, it will make a constant from it at this time and this constant value is then used in all invocations of logfunc1() during the lifetime of the @@ -629,19 +629,19 @@ CREATE FUNCTION logfunc2 (text) RETURNS timestamp AS ' what type 'now' should become and therefore it returns a data type of text containing the string 'now'. During the assignment - to the local variable curtime, the PL/pgSQL interpreter casts this - string to the timestamp type by calling the + to the local variable curtime, the PL/pgSQL interpreter casts this + string to the timestamp type by calling the text_out() and timestamp_in() functions for the conversion. This type checking done by the Postgres main - parser got implemented after PL/pgSQL was nearly done. + parser got implemented after PL/pgSQL was nearly done. It is a difference between 6.3 and 6.4 and affects all functions using the prepared plan feature of the SPI manager. Using a local - variable in the above manner is currently the only way in PL/pgSQL to get + variable in the above manner is currently the only way in PL/pgSQL to get those values interpreted correctly. @@ -659,7 +659,7 @@ CREATE FUNCTION logfunc2 (text) RETURNS timestamp AS ' Statements - Anything not understood by the PL/pgSQL parser as specified below + Anything not understood by the PL/pgSQL parser as specified below will be put into a query and sent down to the database engine to execute. The resulting query should not return any data. @@ -675,8 +675,8 @@ CREATE FUNCTION logfunc2 (text) RETURNS timestamp AS ' If the expressions result data type doesn't match the variables data type, or the variable has a size/precision that is known - (as for char(20)), the result value will be implicitly casted by - the PL/pgSQL bytecode interpreter using the result types output- and + (as for char(20)), the result value will be implicitly cast by + the PL/pgSQL bytecode interpreter using the result types output- and the variables type input-functions. Note that this could potentially result in runtime errors generated by the types input functions. @@ -694,7 +694,7 @@ tax := subtotal * 0.06; All functions defined in a Postgres database return a value. Thus, the normal way to call a function is to execute a SELECT query or doing an assignment (resulting - in a PL/pgSQL internal SELECT). + in a PL/pgSQL internal SELECT). @@ -722,8 +722,8 @@ PERFORM create_mv(''cs_session_page_requests_mv'','' Often times you will want to generate dynamic queries inside - your PL/pgSQL functions. Or you have functions that will - generate other functions. PL/pgSQL provides the EXECUTE + your PL/pgSQL functions. Or you have functions that will + generate other functions. PL/pgSQL provides the EXECUTE statement for these occasions. @@ -738,13 +738,13 @@ EXECUTE query-string When working with dynamic queries you will have to face - escaping of single quotes in PL/pgSQL. Please refer to the + escaping of single quotes in PL/pgSQL. Please refer to the table available at the "Porting from Oracle PL/SQL" chapter for a detailed explanation that will save you some effort. - Unlike all other queries in PL/pgSQL, a + Unlike all other queries in PL/pgSQL, a query run by an EXECUTE statement is not prepared and saved just once during the life of the server. Instead, the query is prepared each @@ -834,7 +834,7 @@ GET DIAGNOSTICS variable = itemitem is a keyword identifying a state value to be assigned to the specified variable (which should be - of the right datatype to receive it). The currently available + of the right data type to receive it). The currently available status items are ROW_COUNT, the number of rows processed by the last SQL query sent down to the SQL engine; and RESULT_OID, @@ -873,7 +873,7 @@ RETURN expression Control Structures Control structures are probably the most useful (and - important) part of PL/SQL. With PL/pgSQL's control structures, + important) part of PL/SQL. With PL/pgSQL's control structures, you can manipulate PostgreSQL data in a very flexible and powerful way. @@ -1022,8 +1022,8 @@ END IF; With the LOOP, WHILE, FOR and EXIT statements, you can - control the flow of execution of your PL/pgSQL program - iteratively. + control the flow of execution of your PL/pgSQL program + iteractively. @@ -1208,7 +1208,7 @@ SELECT INTO target expressionsPL/pgSQL function, use the equivalent syntax CREATE TABLE AS SELECT.) @@ -1344,7 +1344,7 @@ END LOOP; Use the RAISE statement to throw messages into the - Postgres elog mechanism. + Postgres elog mechanism. RAISE level 'format' , variable ...; @@ -1407,7 +1407,7 @@ RAISE EXCEPTION ''Inexistent ID --> %'',user_id; - Thus, the only thing PL/pgSQL currently does when it encounters + Thus, the only thing PL/pgSQL currently does when it encounters an abort during execution of a function or trigger procedure is to write some additional NOTICE level log messages telling in which function and where (line number and type of @@ -1423,7 +1423,7 @@ RAISE EXCEPTION ''Inexistent ID --> %'',user_id; Trigger Procedures - PL/pgSQL can be used to define trigger procedures. They are created + PL/pgSQL can be used to define trigger procedures. They are created with the usual CREATE FUNCTION command as a function with no arguments and a return type of OPAQUE. @@ -1559,7 +1559,7 @@ RAISE EXCEPTION ''Inexistent ID --> %'',user_id; - A PL/pgSQL Trigger Procedure Example + A <application>PL/pgSQL</application> Trigger Procedure Example This trigger ensures, that any time a row is inserted or updated @@ -1611,13 +1611,13 @@ CREATE TRIGGER emp_stamp BEFORE INSERT OR UPDATE ON emp Here are only a few functions to demonstrate how easy it is to - write PL/pgSQL + write PL/pgSQL functions. For more complex examples the programmer - might look at the regression test for PL/pgSQL. + might look at the regression test for PL/pgSQL. - One painful detail in writing functions in PL/pgSQL is the handling + One painful detail in writing functions in PL/pgSQL is the handling of single quotes. The function's source text on CREATE FUNCTION must be a literal string. Single quotes inside of literal strings must be either doubled or quoted with a backslash. We are still looking for @@ -1633,10 +1633,10 @@ CREATE TRIGGER emp_stamp BEFORE INSERT OR UPDATE ON emp - A Simple PL/pgSQL Function to Increment an Integer + A Simple <application>PL/pgSQL</application> Function to Increment an Integer - The following two PL/pgSQL functions are identical to their + The following two PL/pgSQL functions are identical to their counterparts from the C language function discussion. This function receives an integer and increments it by one, returning the incremented value. @@ -1652,7 +1652,7 @@ CREATE FUNCTION add_one (integer) RETURNS integer AS ' - A Simple PL/pgSQL Function to Concatenate Text + A Simple <application>PL/pgSQL</application> Function to Concatenate Text This function receives two text parameters and @@ -1669,16 +1669,16 @@ CREATE FUNCTION concat_text (text, text) RETURNS text AS ' - A PL/pgSQL Function on Composite Type + A <application>PL/pgSQL</application> Function on Composite Type - In this example, we take EMP (a table) and an + In this example, we take EMP (a table) and an integer as arguments to our function, which returns - a boolean. If the "salary" field of the EMP table is - NULL, we return "f". Otherwise we compare with + a boolean. If the salary field of the EMP table is + NULL, we return f. Otherwise we compare with that field with the integer passed to the function and return the boolean result of the comparison (t - or f). This is the PL/pgSQL equivalent to the example from the C + or f). This is the PL/pgSQL equivalent to the example from the C functions. @@ -1743,7 +1743,7 @@ CREATE FUNCTION c_overpaid (EMP, integer) RETURNS boolean AS ' This section explains differences between Oracle's PL/SQL and - PostgreSQL's PL/pgSQL languages in the hopes of helping developers + PostgreSQL's PL/pgSQL languages in the hopes of helping developers port applications from Oracle to PostgreSQL. Most of the code here is from the ArsDigita Clickstream @@ -1753,10 +1753,10 @@ CREATE FUNCTION c_overpaid (EMP, integer) RETURNS boolean AS ' - PL/pgSQL is similar to PL/SQL in many aspects. It is a block + PL/pgSQL is similar to PL/SQL in many aspects. It is a block structured, imperative language (all variables have to be declared). PL/SQL has many more features than its PostgreSQL - counterpart, but PL/pgSQL allows for a great deal of functionality + counterpart, but PL/pgSQL allows for a great deal of functionality and it is being improved constantly. @@ -1937,7 +1937,7 @@ SHOW ERRORS; - Let's go through this function and see the differences to PL/pgSQL: + Let's go through this function and see the differences to PL/pgSQL: @@ -2100,7 +2100,7 @@ end; The following Oracle PL/SQL procedure is used to parse a URL and return several elements (host, path and query). It is an - procedure because in PL/pgSQL functions only one value can be returned + procedure because in PL/pgSQL functions only one value can be returned (see ). In PostgreSQL, one way to work around this is to split the procedure in three different functions: one to return the host, another for @@ -2203,7 +2203,7 @@ end; Oracle procedures give a little more flexibility to the developer because nothing needs to be explicitly returned, but it can be - through the use of INOUT or OUT parameters. + through the use of INOUT or OUT parameters. @@ -2253,14 +2253,14 @@ show errors - If you do a LOCK TABLE in PL/pgSQL, the lock + If you do a LOCK TABLE in PL/pgSQL, the lock will not be released until the calling transaction is finished. - You also cannot have transactions in PL/pgSQL procedures. The + You also cannot have transactions in PL/pgSQL procedures. The entire function (and other functions called from therein) is executed in a transaction and PostgreSQL rolls back the results if something goes wrong. Therefore only one @@ -2278,7 +2278,7 @@ show errors - So let's see one of the ways we could port this procedure to PL/pgSQL: + So let's see one of the ways we could port this procedure to PL/pgSQL: drop function cs_create_job(integer); @@ -2316,7 +2316,7 @@ end; - Notice how you can raise notices (or errors) in PL/pgSQL. + Notice how you can raise notices (or errors) in PL/pgSQL. @@ -2423,7 +2423,7 @@ END; - Optimizing PL/pgSQL Functions + Optimizing <application>PL/pgSQL</application> Functions PostgreSQL gives you two function creation modifiers to optimize diff --git a/doc/src/sgml/pltcl.sgml b/doc/src/sgml/pltcl.sgml index 7569846760..29c1850f18 100644 --- a/doc/src/sgml/pltcl.sgml +++ b/doc/src/sgml/pltcl.sgml @@ -1,9 +1,9 @@ - PL/Tcl - TCL Procedural Language + PL/Tcl - Tcl Procedural Language PL/Tcl @@ -37,7 +37,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/pltcl.sgml,v 2.12 2001/08/02 15:45:55 momji The good restriction is that everything is executed in a safe Tcl interpreter. In addition to the limited command set of safe Tcl, only a few commands are available to access the database via SPI and to raise - messages via elog(). There is no way to access internals of the + messages via elog(). There is no way to access internals of the database backend or to gain OS-level access under the permissions of the Postgres user ID, as a C function can do. Thus, any unprivileged database user may be @@ -157,7 +157,7 @@ CREATE FUNCTION overpaid_2 (EMP) RETURNS bool AS ' To help protect PL/Tcl procedures from unwanted side effects, - an array is made available to each procedure via the upvar + an array is made available to each procedure via the upvar command. The global name of this variable is the procedure's internal name and the local name is GD. It is recommended that GD be used for private status data of a procedure. Use regular Tcl global variables @@ -210,7 +210,7 @@ CREATE FUNCTION overpaid_2 (EMP) RETURNS bool AS ' A Tcl list of the tables field names prefixed with an empty list element. - So looking up an element name in the list with the lsearch Tcl command + So looking up an element name in the list with the lsearch Tcl command returns the same positive number starting from 1 as the fields are numbered in the pg_attribute system catalog. @@ -344,7 +344,7 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab elog - elog level msg + elog level msg Fire a log message. Possible levels are NOTICE, ERROR, @@ -355,7 +355,7 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab - quote string + quote string Duplicates all occurrences of single quote and backslash characters. @@ -398,7 +398,7 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab spi_lastoid - spi_lastoid + spi_lastoid Returns the OID of the last query if it was an INSERT. @@ -407,7 +407,7 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab - spi_exec ?-count n? ?-array name? query ?loop-body? + spi_exec ?-count n? ?-array name? query ?loop-body? Call parser/planner/optimizer/executor for query. @@ -446,45 +446,45 @@ spi_exec -array C "SELECT * FROM pg_class" { - spi_prepare query typelist + spi_prepare query typelist Prepares AND SAVES a query plan for later execution. It is a bit different from the C level SPI_prepare in that the plan is automatically copied to the - toplevel memory context. Thus, there is currently no way of preparing a + top-level memory context. Thus, there is currently no way of preparing a plan without saving it. If the query references arguments, the type names must be given as a Tcl - list. The return value from spi_prepare is a query ID to be used in - subsequent calls to spi_execp. See spi_execp for a sample. + list. The return value from spi_prepare is a query ID to be used in + subsequent calls to spi_execp. See spi_execp for a sample. - spi_exec ?-count n? ?-arrayname? ?-nullsstring? queryid ?value-list? ?loop-body? + spi_exec ?-count n? ?-arrayname? ?-nullsstring? queryid ?value-list? ?loop-body? - Execute a prepared plan from spi_prepare with variable substitution. - The optional -count value tells spi_execp the maximum number of rows + Execute a prepared plan from spi_prepare with variable substitution. + The optional -count value tells spi_execp the maximum number of rows to be processed by the query. - The optional value for -nulls is a string of spaces and 'n' characters - telling spi_execp which of the values are NULL's. If given, it must + The optional value for -nulls is a string of spaces and 'n' characters + telling spi_execp which of the values are NULL's. If given, it must have exactly the length of the number of values. - The queryid is the ID returned by the spi_prepare call. + The queryid is the ID returned by the spi_prepare call. - If there was a typelist given to spi_prepare, a Tcl list of values of + If there was a typelist given to spi_prepare, a Tcl list of values of exactly the same length must be given to spi_execp after the query. If the type list on spi_prepare was empty, this argument must be omitted. - If the query is a SELECT statement, the same as described for spi_exec + If the query is a SELECT statement, the same as described for spi_exec happens for the loop-body and the variables for the fields selected. @@ -506,7 +506,7 @@ CREATE FUNCTION t1_count(int4, int4) RETURNS int4 AS ' Note that each backslash that Tcl should see must be doubled in the query creating the function, since the main parser processes backslashes too on CREATE FUNCTION. - Inside the query string given to spi_prepare should + Inside the query string given to spi_prepare should really be dollar signs to mark the parameter positions and to not let $1 be substituted by the value given in the first function call. @@ -515,18 +515,20 @@ CREATE FUNCTION t1_count(int4, int4) RETURNS int4 AS ' - Modules and the unknown command + Modules and the unknown command - PL/Tcl has a special support for things often used. It recognizes two - magic tables, pltcl_modules and pltcl_modfuncs. - If these exist, the module 'unknown' is loaded into the interpreter - right after creation. Whenever an unknown Tcl procedure is called, - the unknown proc is asked to check if the procedure is defined in one - of the modules. If this is true, the module is loaded on demand. - To enable this behavior, the PL/Tcl call handler must be compiled - with -DPLTCL_UNKNOWN_SUPPORT set. + PL/Tcl has a special support for things often used. It + recognizes two magic tables, pltcl_modules and + pltcl_modfuncs. If these exist, the module + 'unknown' is loaded into the interpreter right after + creation. Whenever an unknown Tcl procedure is called, the + unknown proc is asked to check if the procedure is defined in + one of the modules. If this is true, the module is loaded on + demand. To enable this behavior, the + PostgreSQL must be configured with the option + . There are support scripts to maintain these tables in the modules diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index 512f5f266b..eafc2b29af 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -1,4 +1,4 @@ - + The <ProductName>Postgres</ProductName> Rule System @@ -43,7 +43,7 @@ []. -What is a Querytree? +What is a Query Tree? To understand how the rule system works it is necessary to know @@ -52,32 +52,32 @@ The rule system is located between the query parser and the planner. - It takes the output of the parser, one querytree, and the rewrite + It takes the output of the parser, one query tree, and the rewrite rules from the pg_rewrite catalog, which are - querytrees too with some extra information, and creates zero or many - querytrees as result. So its input and output are always things + query trees too with some extra information, and creates zero or many + query trees as result. So its input and output are always things the parser itself could have produced and thus, anything it sees is basically representable as an SQL statement. - Now what is a querytree? It is an internal representation of an + Now what is a query tree? It is an internal representation of an SQL statement where the single parts that built - it are stored separately. These querytrees are visible when starting - the Postgres backend with debuglevel 4 + it are stored separately. These query trees are visible when starting + the Postgres backend with debug level 4 and typing queries into the interactive backend interface. The rule actions in the pg_rewrite system catalog are - also stored as querytrees. They are not formatted like the debug + also stored as query trees. They are not formatted like the debug output, but they contain exactly the same information. - Reading a querytree requires some experience and it was a hard + Reading a query tree requires some experience and it was a hard time when I started to work on the rule system. I can remember that I was standing at the coffee machine and I saw the cup - in a targetlist, water and coffee powder in a rangetable and all + in a target list, water and coffee powder in a range table and all the buttons in a qualification expression. Since - SQL representations of querytrees are + SQL representations of query trees are sufficient to understand the rule system, this document will not teach how to read them. It might help to learn it and the naming conventions are required in the later following @@ -85,47 +85,45 @@ -The Parts of a Querytree +The Parts of a Query tree When reading the SQL representations of the - querytrees in this document it is necessary to be able to identify - the parts the statement is broken into when it is in the querytree - structure. The parts of a querytree are - + query trees in this document it is necessary to be able to identify + the parts the statement is broken into when it is in the query tree + structure. The parts of a query tree are - - the commandtype + the command type This is a simple value telling which command - (SELECT, INSERT, UPDATE, DELETE) produced the parsetree. + (SELECT, INSERT, UPDATE, DELETE) produced the parse tree. - the rangetable + the range table - The rangetable is a list of relations that are used in the query. + The range table is a list of relations that are used in the query. In a SELECT statement these are the relations given after the FROM keyword. - Every rangetable entry identifies a table or view and tells + Every range table entry identifies a table or view and tells by which name it is called in the other parts of the query. - In the querytree the rangetable entries are referenced by + In the query tree the range table entries are referenced by index rather than by name, so here it doesn't matter if there are duplicate names as it would in an SQL - statement. This can happen after the rangetables of rules + statement. This can happen after the range tables of rules have been merged in. The examples in this document will not have this situation. @@ -134,11 +132,11 @@ - the resultrelation + the result relation - This is an index into the rangetable that identifies the + This is an index into the range table that identifies the relation where the results of the query go. @@ -151,7 +149,7 @@ - On INSERT, UPDATE and DELETE queries the resultrelation + On INSERT, UPDATE and DELETE queries the result relation is the table (or view!) where the changes take effect. @@ -159,11 +157,11 @@ - the targetlist + the target list - The targetlist is a list of expressions that define the result + The target list is a list of expressions that define the result of the query. In the case of a SELECT, the expressions are what builds the final output of the query. They are the expressions between the SELECT and the FROM keywords. (* is just an @@ -173,23 +171,23 @@ - DELETE queries don't need a targetlist because they don't + DELETE queries don't need a target list because they don't produce any result. In fact the planner will add a special CTID - entry to the empty targetlist. But this is after the rule + entry to the empty target list. But this is after the rule system and will be discussed later. For the rule system the - targetlist is empty. + target list is empty. - In INSERT queries the targetlist describes the new rows that - should go into the resultrelation. It is the expressions in the VALUES + In INSERT queries the target list describes the new rows that + should go into the result relation. It is the expressions in the VALUES clause or the ones from the SELECT clause in INSERT ... SELECT. - Missing columns of the resultrelation will be filled in by the + Missing columns of the result relation will be filled in by the planner with a constant NULL expression. - In UPDATE queries, the targetlist describes the new rows that should + In UPDATE queries, the target list describes the new rows that should replace the old ones. In the rule system, it contains just the expressions from the SET attribute = expression part of the query. The planner will add missing columns by inserting expressions that @@ -198,9 +196,9 @@ - Every entry in the targetlist contains an expression that can + Every entry in the target list contains an expression that can be a constant value, a variable pointing to an attribute of one - of the relations in the rangetable, a parameter, or an expression + of the relations in the range table, a parameter, or an expression tree made of function calls, constants, variables, operators etc. @@ -213,8 +211,8 @@ The query's qualification is an expression much like one of those - contained in the targetlist entries. The result value of this - expression is a boolean that tells if the operation + contained in the target list entries. The result value of this + expression is a Boolean that tells if the operation (INSERT, UPDATE, DELETE or SELECT) for the final result row should be executed or not. It is the WHERE clause of an SQL statement. @@ -232,7 +230,7 @@ For a simple query like SELECT FROM a, b, c the join tree is just a list of the FROM items, because we are allowed to join them in any order. But when JOIN expressions --- particularly outer joins - --- are used, we have to join in the order shown by the JOINs. + --- are used, we have to join in the order shown by the joins. The join tree shows the structure of the JOIN expressions. The restrictions associated with particular JOIN clauses (from ON or USING expressions) are stored as qualification expressions attached @@ -250,7 +248,7 @@ - The other parts of the querytree like the ORDER BY + The other parts of the query tree like the ORDER BY clause aren't of interest here. The rule system substitutes entries there while applying rules, but that doesn't have much to do with the fundamentals of the rule @@ -304,7 +302,7 @@ Rules ON SELECT are applied to all queries as the last step, even if the command given is an INSERT, UPDATE or DELETE. And they have different - semantics from the others in that they modify the parsetree in + semantics from the others in that they modify the parse tree in place instead of creating a new one. So SELECT rules are described first. @@ -329,9 +327,9 @@ - The database needed to play with the examples is named al_bundy. + The database needed to play with the examples is named al_bundy. You'll see soon why this is the database name. And it needs the - procedural language PL/pgSQL installed, because + procedural language PL/pgSQL installed, because we need a little min() function returning the lower of 2 integer values. We create that as @@ -424,7 +422,7 @@ will create a relation shoelace and an entry in pg_rewrite that tells that there is a rewrite rule that must be applied - whenever the relation shoelace is referenced in a query's rangetable. + whenever the relation shoelace is referenced in a query's range table. The rule has no rule qualification (discussed later, with the non SELECT rules, since SELECT rules currently cannot have them) and it is INSTEAD. Note that rule qualifications are not the same as @@ -432,7 +430,7 @@ - The rule's action is one querytree that is a copy of the + The rule's action is one query tree that is a copy of the SELECT statement in the view creation command. @@ -440,7 +438,7 @@ The two extra range table entries for NEW and OLD (named *NEW* and *CURRENT* for - historical reasons in the printed querytree) you can see in + historical reasons in the printed query tree) you can see in the pg_rewrite entry aren't of interest for SELECT rules. @@ -508,10 +506,10 @@ and this is given to the rule system. The rule system walks through the - rangetable and checks if there are rules in pg_rewrite - for any relation. When processing the rangetable entry for + range table and checks if there are rules in pg_rewrite + for any relation. When processing the range table entry for shoelace (the only one up to now) it finds the - rule '_RETshoelace' with the parsetree + rule _RETshoelace with the parse tree SELECT s.sl_name, s.sl_avail, @@ -530,8 +528,8 @@ To expand the view, the rewriter simply creates a subselect rangetable entry containing the rule's action parsetree, and substitutes this - rangetable entry for the original one that referenced the view. The - resulting rewritten parsetree is almost the same as if Al had typed + range table entry for the original one that referenced the view. The + resulting rewritten parse tree is almost the same as if Al had typed SELECT shoelace.sl_name, shoelace.sl_avail, @@ -547,10 +545,10 @@ WHERE s.sl_unit = u.un_name) shoelace; - There is one difference however: the sub-query's rangetable has two + There is one difference however: the sub-query's range table has two extra entries shoelace *OLD*, shoelace *NEW*. These entries don't participate directly in the query, since they aren't referenced by - the sub-query's join tree or targetlist. The rewriter uses them + the sub-query's join tree or target list. The rewriter uses them to store the access permission check info that was originally present in the rangetable entry that referenced the view. In this way, the executor will still check that the user has proper permissions to access @@ -598,7 +596,7 @@ - The output of the parser this time is the parsetree + The output of the parser this time is the parse tree SELECT shoe_ready.shoename, shoe_ready.sh_avail, @@ -610,7 +608,7 @@ The first rule applied will be the one for the shoe_ready view and it results in the - parsetree + parse tree SELECT shoe_ready.shoename, shoe_ready.sh_avail, @@ -629,8 +627,8 @@ Similarly, the rules for shoe and - shoelace are substituted into the rangetable of - the sub-query, leading to a three-level final querytree: + shoelace are substituted into the range table of + the sub-query, leading to a three-level final query tree: SELECT shoe_ready.shoename, shoe_ready.sh_avail, @@ -666,7 +664,7 @@ It turns out that the planner will collapse this tree into a two-level - querytree: the bottommost selects will be "pulled up" into the middle + query tree: the bottommost selects will be "pulled up" into the middle select since there's no need to process them separately. But the middle select will remain separate from the top, because it contains aggregate functions. If we pulled those up it would change the behavior @@ -698,18 +696,18 @@ View Rules in Non-SELECT Statements - Two details of the parsetree aren't touched in the description of - view rules above. These are the commandtype and the resultrelation. + Two details of the parse tree aren't touched in the description of + view rules above. These are the command type and the result relation. In fact, view rules don't need this information. - There are only a few differences between a parsetree for a SELECT - and one for any other command. Obviously they have another commandtype - and this time the resultrelation points to the rangetable entry where + There are only a few differences between a parse tree for a SELECT + and one for any other command. Obviously they have another command type + and this time the result relation points to the range table entry where the result should go. Everything else is absolutely the same. So having two tables t1 and t2 with attributes - a and b, the parsetrees for the two statements + a and b, the parse trees for the two statements SELECT t2.b FROM t1, t2 WHERE t1.a = t2.a; @@ -722,14 +720,14 @@ - The rangetables contain entries for the tables t1 and t2. + The range tables contain entries for the tables t1 and t2. - The targetlists contain one variable that points to attribute - b of the rangetable entry for table t2. + The target lists contain one variable that points to attribute + b of the range table entry for table t2. @@ -742,15 +740,15 @@ - The jointrees show a simple join between t1 and t2. + The join trees show a simple join between t1 and t2. - The consequence is, that both parsetrees result in similar execution + The consequence is, that both parse trees result in similar execution plans. They are both joins over the two tables. For the UPDATE - the missing columns from t1 are added to the targetlist by the planner - and the final parsetree will read as + the missing columns from t1 are added to the target list by the planner + and the final parse tree will read as UPDATE t1 SET a = t1.a, b = t2.b WHERE t1.a = t2.a; @@ -768,18 +766,18 @@ for. It just produces a result set of rows. The difference that one is a SELECT command and the other is an UPDATE is handled in the caller of the executor. The caller still knows (looking at the - parsetree) that this is an UPDATE, and he knows that this result + parse tree) that this is an UPDATE, and he knows that this result should go into table t1. But which of the rows that are there has to be replaced by the new row? - To resolve this problem, another entry is added to the targetlist + To resolve this problem, another entry is added to the target list in UPDATE (and also in DELETE) statements: the current tuple ID (ctid). This is a system attribute containing the file block number and position in the block for the row. Knowing the table, the ctid can be used to retrieve the original t1 row to be updated. - After adding the ctid to the targetlist, the query actually looks like + After adding the ctid to the target list, the query actually looks like SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; @@ -791,7 +789,7 @@ into the table (after stripping ctid) and in the tuple header of the row that ctid pointed to the cmax and xmax entries are set to the current command counter and current transaction ID. Thus the old row is hidden - and after the transaction commited the vacuum cleaner can really move + and after the transaction committed the vacuum cleaner can really move it out. @@ -806,8 +804,8 @@ The above demonstrates how the rule system incorporates - view definitions into the original parsetree. In the second example - a simple SELECT from one view created a final parsetree that is + view definitions into the original parse tree. In the second example + a simple SELECT from one view created a final parse tree that is a join of 4 tables (unit is used twice with different names). @@ -821,7 +819,7 @@ relationships between these tables plus the restrictive qualifications from the views plus the qualifications from the original query - in one single parsetree. And this is still the situation + in one single parse tree. And this is still the situation when the original query is already a join over views. Now the planner has to decide which is the best path to execute the query. The more information @@ -839,13 +837,13 @@ What happens if a view is named as the target relation for an INSERT, UPDATE, or DELETE? After doing the substitutions described above, - we will have a querytree in which the resultrelation points at a - subquery rangetable entry. This will not work, so the rewriter throws + we will have a query tree in which the result relation points at a + subquery range table entry. This will not work, so the rewriter throws an error if it sees it has produced such a thing. - To change this we can define rules that modify the behaviour + To change this we can define rules that modify the behavior of non-SELECT queries. This is the topic of the next section. @@ -896,8 +894,8 @@ - Second, they don't modify the parsetree in place. Instead they - create zero or many new parsetrees and can throw away the + Second, they don't modify the parse tree in place. Instead they + create zero or many new parse trees and can throw away the original one. @@ -920,10 +918,10 @@ Update rules get applied by the rule system when the result - relation and the commandtype of a parsetree are equal to the + relation and the command type of a parse tree are equal to the object and event given in the CREATE RULE command. - For update rules, the rule system creates a list of parsetrees. - Initially the parsetree list is empty. + For update rules, the rule system creates a list of parse trees. + Initially the parse tree list is empty. There can be zero (NOTHING keyword), one or multiple actions. To simplify, we look at a rule with one action. This rule can have a qualification or not and it can be INSTEAD or not. @@ -938,7 +936,7 @@ - So we have four cases that produce the following parsetrees for + So we have four cases that produce the following parse trees for a one-action rule. @@ -949,8 +947,8 @@ - The parsetree from the rule action where the - original parsetree's qualification has been added. + The parse tree from the rule action where the + original parse tree's qualification has been added. @@ -963,8 +961,8 @@ - The parsetree from the rule action where the - original parsetree's qualification has been added. + The parse tree from the rule action where the + original parse tree's qualification has been added. @@ -977,8 +975,8 @@ - The parsetree from the rule action where the rule - qualification and the original parsetree's + The parse tree from the rule action where the rule + qualification and the original parse tree's qualification have been added. @@ -992,15 +990,15 @@ - The parsetree from the rule action where the rule - qualification and the original parsetree's + The parse tree from the rule action where the rule + qualification and the original parse tree's qualification have been added. - The original parsetree where the negated rule + The original parse tree where the negated rule qualification has been added. @@ -1010,9 +1008,9 @@ - Finally, if the rule is not INSTEAD, the unchanged original parsetree is + Finally, if the rule is not INSTEAD, the unchanged original parse tree is added to the list. Since only qualified INSTEAD rules already add the - original parsetree, we end up with either one or two output parsetrees + original parse tree, we end up with either one or two output parse trees for a rule with one action. @@ -1027,11 +1025,11 @@ - The parsetrees generated from rule actions are thrown into the + The parse trees generated from rule actions are thrown into the rewrite system again and maybe more rules get applied resulting - in more or less parsetrees. - So the parsetrees in the rule actions must have either another commandtype - or another resultrelation. Otherwise this recursive process will end up in a loop. + in more or less parse trees. + So the parse trees in the rule actions must have either another command type + or another result relation. Otherwise this recursive process will end up in a loop. There is a compiled in recursion limit of currently 10 iterations. If after 10 iterations there are still update rules to apply the rule system assumes a loop over multiple rule definitions and reports @@ -1138,9 +1136,9 @@ The rule is a qualified non-INSTEAD rule, so the rule system - has to return two parsetrees: the modified rule action and the original - parsetree. In the first step the rangetable of the original query is - incorporated into the rule's action parsetree. This results in + has to return two parse trees: the modified rule action and the original + parsetree. In the first step the range table of the original query is + incorporated into the rule's action parse tree. This results in INSERT INTO shoelace_log VALUES( @@ -1167,9 +1165,9 @@ difficulty with it. They need to support this same functionality anyway for INSERT ... SELECT. - In step 3 the original parsetree's qualification is added, - restricting the resultset further to only the rows touched - by the original parsetree. + In step 3 the original parse tree's qualification is added, + restricting the result set further to only the rows touched + by the original parse tree. INSERT INTO shoelace_log VALUES( @@ -1181,8 +1179,8 @@ AND bpchareq(shoelace_data.sl_name, 'sl7'); - Step 4 substitutes NEW references by the targetlist entries from the - original parsetree or with the matching variable references + Step 4 substitutes NEW references by the target list entries from the + original parse tree or with the matching variable references from the result relation. @@ -1195,7 +1193,7 @@ AND bpchareq(shoelace_data.sl_name, 'sl7'); - Step 5 changes OLD references into resultrelation references. + Step 5 changes OLD references into result relation references. INSERT INTO shoelace_log VALUES( @@ -1208,8 +1206,8 @@ That's it. Since the rule is not INSTEAD, we also output the - original parsetree. In short, the output from the rule system - is a list of two parsetrees that are the same as the statements: + original parse tree. In short, the output from the rule system + is a list of two parse trees that are the same as the statements: INSERT INTO shoelace_log VALUES( @@ -1233,7 +1231,7 @@ no log entry would get written. This - time the original parsetree does not contain a targetlist + time the original parse tree does not contain a target list entry for sl_avail, so NEW.sl_avail will get replaced by shoelace_data.sl_avail resulting in the extra query @@ -1257,8 +1255,8 @@ four rows in fact get updated (sl1, sl2, sl3 and sl4). But sl3 already has sl_avail = 0. This time, the original - parsetrees qualification is different and that results - in the extra parsetree + parse trees qualification is different and that results + in the extra parse tree INSERT INTO shoelace_log SELECT @@ -1269,12 +1267,12 @@ AND shoelace_data.sl_color = 'black'; - This parsetree will surely insert three new log entries. And + This parse tree will surely insert three new log entries. And that's absolutely correct. - Here we can see why it is important that the original parsetree is + Here we can see why it is important that the original parse tree is executed last. If the UPDATE would have been executed first, all the rows are already set to zero, so the logging INSERT @@ -1290,7 +1288,7 @@ A simple way to protect view relations from the mentioned possibility that someone can try to INSERT, UPDATE and DELETE - on them is to let those parsetrees get + on them is to let those parse trees get thrown away. We create the rules @@ -1306,7 +1304,7 @@ relation shoe, the rule system will apply the rules. Since the rules have no actions and are INSTEAD, the resulting list of - parsetrees will be empty and the whole query will become + parse trees will be empty and the whole query will become nothing because there is nothing left to be optimized or executed after the rule system is done with it. @@ -1316,8 +1314,8 @@ This way might irritate frontend applications because absolutely nothing happened on the database and thus, the backend will not return anything for the query. Not - even a PGRES_EMPTY_QUERY will be available in libpq. - In psql, nothing happens. This might change in the future. + even a PGRES_EMPTY_QUERY will be available in libpq. + In psql, nothing happens. This might change in the future. @@ -1325,7 +1323,7 @@ A more sophisticated way to use the rule system is to - create rules that rewrite the parsetree into one that + create rules that rewrite the parse tree into one that does the right operation on the real tables. To do that on the shoelace view, we create the following rules: @@ -1357,10 +1355,10 @@ Now there is a pack of shoelaces arriving in Al's shop and it has - a big partlist. Al is not that good in calculating and so + a big part list. Al is not that good in calculating and so we don't want him to manually update the shoelace view. Instead we setup two little tables, one where he can - insert the items from the partlist and one with a special + insert the items from the part list and one with a special trick. The create commands for these are: @@ -1445,7 +1443,7 @@ It's a long way from the one INSERT ... SELECT to these results. And its description will be the last in this - document (but not the last example :-). First there was the parsers output + document (but not the last example :-). First there was the parser's output INSERT INTO shoelace_ok SELECT @@ -1467,7 +1465,7 @@ and throws away the original INSERT on shoelace_ok. This rewritten query is passed to the rule system again and - the second applied rule 'shoelace_upd' produced + the second applied rule shoelace_upd produced UPDATE shoelace_data SET @@ -1484,10 +1482,10 @@ AND bpchareq(shoelace_data.sl_name, shoelace.sl_name); - Again it's an INSTEAD rule and the previous parsetree is trashed. + Again it's an INSTEAD rule and the previous parse tree is trashed. Note that this query still uses the view shoelace. But the rule system isn't finished with this loop so it continues - and applies the rule '_RETshoelace' on it and we get + and applies the rule _RETshoelace on it and we get UPDATE shoelace_data SET @@ -1508,8 +1506,8 @@ Again an update rule has been applied and so the wheel turns on and we are in rewrite round 3. This time rule - 'log_shoelace' gets applied what produces the extra - parsetree + log_shoelace gets applied what produces the extra + parse tree INSERT INTO shoelace_log SELECT @@ -1532,8 +1530,8 @@ After that the rule system runs out of rules and returns the - generated parsetrees. - So we end up with two final parsetrees that are equal to the + generated parse trees. + So we end up with two final parse trees that are equal to the SQL statements @@ -1566,7 +1564,7 @@ There is a little detail that's a bit ugly. Looking at the two queries turns out, that the shoelace_data - relation appears twice in the rangetable where it could definitely + relation appears twice in the range table where it could definitely be reduced to one. The planner does not handle it and so the execution plan for the rule systems output of the INSERT will be @@ -1582,7 +1580,7 @@ Nested Loop -> Seq Scan on shoelace_data - while omitting the extra rangetable entry would result in a + while omitting the extra range table entry would result in a Merge Join @@ -1659,7 +1657,7 @@ Merge Join WHERE sl_name = shoelace.sl_name); - Voila: + Voilą: al_bundy=> SELECT * FROM shoelace; @@ -1682,7 +1680,7 @@ Merge Join itself has a subselect qualification containing a view and where calculated view columns are used, gets rewritten into - one single parsetree that deletes the requested data + one single parse tree that deletes the requested data from a real table. @@ -1718,7 +1716,7 @@ Merge Join a relation (table or view) is automatically the owner of the rewrite rules that are defined for it. The Postgres rule system changes the - behaviour of the default access control system. Relations that + behavior of the default access control system. Relations that are used due to rules get checked against the permissions of the rule owner, not the user invoking the rule. This means, that a user does only need the required permissions @@ -1822,7 +1820,7 @@ Merge Join For the things that can be implemented by both, it depends on the usage of the database, which is the best. A trigger is fired for any row affected once. A rule manipulates - the parsetree or generates an additional one. So if many + the parse tree or generates an additional one. So if many rows are affected in one statement, a rule issuing one extra query would usually do a better job than a trigger that is called for any single row and must execute his operations @@ -1845,8 +1843,8 @@ Merge Join Both tables have many - thousands of rows and the index on hostname is unique. - The hostname column contains the full qualified domain + thousands of rows and the index on hostname is unique. + The hostname column contains the full qualified domain name of the computer. The rule/trigger should constraint delete rows from software that reference the deleted host. Since the trigger is called for each individual row @@ -1856,7 +1854,7 @@ Merge Join DELETE FROM software WHERE hostname = $1; - in a prepared and saved plan and pass the hostname in + in a prepared and saved plan and pass the hostname in the parameter. The rule would be written as @@ -1890,7 +1888,7 @@ Merge Join So there would be not that much difference in speed between the trigger and the rule implementation. With the next delete - we want to get rid of all the 2000 computers where the hostname starts + we want to get rid of all the 2000 computers where the hostname starts with 'old'. There are two possible queries to do that. One is @@ -1922,7 +1920,7 @@ Merge Join This shows, that the planner does not realize that the - qualification for the hostname on computer could also be + qualification for the hostname on computer could also be used for an index scan on software when there are multiple qualification expressions combined with AND, what he does in the regexp version of the query. The trigger will @@ -1931,7 +1929,7 @@ Merge Join over computer and 2000 index scans for the software. The rule implementation will do it with two queries over indexes. And it depends on the overall size of the software table if - the rule will still be faster in the seqscan situation. 2000 + the rule will still be faster in the sequential scan situation. 2000 query executions over the SPI manager take some time, even if all the index blocks to look them up will soon appear in the cache. @@ -1946,8 +1944,8 @@ Merge Join Again this could result in many rows to be deleted from computer. So the trigger will again fire many queries into - the executor. But the rule plan will again be the Nestloop over - two IndexScan's. Only using another index on computer: + the executor. But the rule plan will again be the nested loop over + two index scans. Only using another index on computer: Nestloop @@ -1977,7 +1975,7 @@ Merge Join create a rule as in the shoelace_log example is to do it with a rule qualification. That results in an extra query that is performed always, even if the attribute of interest cannot - change at all because it does not appear in the targetlist + change at all because it does not appear in the target list of the initial query. When this is enabled again, it will be one more advantage of rules over triggers. Optimization of a trigger must fail by definition in this case, because the @@ -1986,7 +1984,7 @@ Merge Join a trigger only allows to specify it on row level, so whenever a row is touched, the trigger must be called to make its decision. The rule system will know it by looking up the - targetlist and will suppress the additional query completely + target list and will suppress the additional query completely if the attribute isn't touched. So the rule, qualified or not, will only do its scans if there ever could be something to do. diff --git a/doc/src/sgml/xaggr.sgml b/doc/src/sgml/xaggr.sgml index 9451d536b4..6b92ed01c5 100644 --- a/doc/src/sgml/xaggr.sgml +++ b/doc/src/sgml/xaggr.sgml @@ -1,5 +1,5 @@ @@ -17,7 +17,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xaggr.sgml,v 1.12 2001/05/12 22:51:36 peter That is, an aggregate can be defined in terms of state that is modified whenever an input item is processed. To define a new aggregate - function, one selects a datatype for the state value, + function, one selects a data type for the state value, an initial value for the state, and a state transition function. The state transition function is just an ordinary function that could also be used outside the @@ -28,8 +28,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xaggr.sgml,v 1.12 2001/05/12 22:51:36 peter - Thus, in addition to the input and result datatypes seen by a user - of the aggregate, there is an internal state-value datatype that + Thus, in addition to the input and result data types seen by a user + of the aggregate, there is an internal state-value data type that may be different from both the input and result types. @@ -40,8 +40,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xaggr.sgml,v 1.12 2001/05/12 22:51:36 peter example of this kind of aggregate. "Sum" starts at zero and always adds the current row's value to its running total. For example, if we want to make a Sum - aggregate to work on a datatype for complex numbers, - we only need the addition function for that datatype. + aggregate to work on a data type for complex numbers, + we only need the addition function for that data type. The aggregate definition is: @@ -71,8 +71,8 @@ SELECT complex_sum(a) FROM test_complex; state condition) if there are no non-null input values. Perhaps we want to return NULL in that case instead --- SQL92 expects "Sum" to behave that way. We can do this simply by - omitting the "initcond" phrase, so that the initial state - condition is NULL. Ordinarily this would mean that the sfunc + omitting the initcond phrase, so that the initial state + condition is NULL. Ordinarily this would mean that the sfunc would need to check for a NULL state-condition input, but for "Sum" and some other simple aggregates like "Max" and "Min", it's sufficient to insert the first non-null input value into diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml index 1e79981f70..4f79dc317a 100644 --- a/doc/src/sgml/xfunc.sgml +++ b/doc/src/sgml/xfunc.sgml @@ -1,5 +1,5 @@ @@ -1427,7 +1427,7 @@ LANGUAGE 'c'; in the prosrc attribute of the pg_proc table entry. This may be the source text in the procedural language itself (like for PL/Tcl), a - pathname to a file, or anything else that tells the call handler + path name to a file, or anything else that tells the call handler what to do in detail. diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 2c49d46bab..29568422a9 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -1,5 +1,5 @@ @@ -70,7 +70,7 @@ Postgres documentation amcanmulticol - does AM support multi-column indexes? + does AM support multicolumn indexes? amindexnulls @@ -288,7 +288,7 @@ SELECT oid, * The above example assumes that you want to make this new opclass the - default B-tree opclass for the complex datatype. + default B-tree opclass for the complex data type. If you don't, just set opcdefault to false instead. opckeytype is not described here; it should always be zero for B-tree opclasses. @@ -354,24 +354,24 @@ CREATE FUNCTION complex_abs_eq(complex, complex) are being defined. We can only have one operator named, say, = and taking type complex for both operands. In this case we don't have any other operator = for complex, - but if we were building a practical datatype we'd probably want = to + but if we were building a practical data type we'd probably want = to be the ordinary equality operation for complex numbers. In that case, we'd need to use some other operator name for complex_abs_eq. Second, although Postgres can cope with operators having - the same name as long as they have different input datatypes, C can only + the same name as long as they have different input data types, C can only cope with one global routine having a given name, period. So we shouldn't name the C function something simple like abs_eq. - Usually it's a good practice to include the datatype name in the C - function name, so as not to conflict with functions for other datatypes. + Usually it's a good practice to include the data type name in the C + function name, so as not to conflict with functions for other data types. Third, we could have made the Postgres name of the function abs_eq, relying on Postgres to distinguish it - by input datatypes from any other Postgres function of the same name. + by input data types from any other Postgres function of the same name. To keep the example simple, we make the function have the same names at the C level and Postgres level. diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index 57d8bb79c2..baf36e27a1 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -1,5 +1,5 @@ @@ -113,7 +113,7 @@ SELECT (a + b) AS c FROM test_complex; commutator of the operator being defined. We say that operator A is the commutator of operator B if (x A y) equals (y B x) for all possible input values x,y. Notice that B is also the commutator of A. For example, - operators '<' and '>' for a particular datatype are usually each others' + operators '<' and '>' for a particular data type are usually each others' commutators, and operator '+' is usually commutative with itself. But operator '-' is usually not commutative with anything. @@ -176,7 +176,7 @@ SELECT (a + b) AS c FROM test_complex; is the negator of operator B if both return boolean results and (x A y) equals NOT (x B y) for all possible inputs x,y. Notice that B is also the negator of A. - For example, '<' and '>=' are a negator pair for most datatypes. + For example, '<' and '>=' are a negator pair for most data types. An operator can never be validly be its own negator. @@ -260,11 +260,11 @@ SELECT (a + b) AS c FROM test_complex; - You can use scalarltsel and scalargtsel for comparisons on datatypes that + You can use scalarltsel and scalargtsel for comparisons on data types that have some sensible means of being converted into numeric scalars for - range comparisons. If possible, add the datatype to those understood - by the routine convert_to_scalar() in src/backend/utils/adt/selfuncs.c. - (Eventually, this routine should be replaced by per-datatype functions + range comparisons. If possible, add the data type to those understood + by the routine convert_to_scalar() in src/backend/utils/adt/selfuncs.c. + (Eventually, this routine should be replaced by per-data-type functions identified through a column of the pg_type table; but that hasn't happened yet.) If you do not do this, things will still work, but the optimizer's estimates won't be as good as they could be. @@ -272,7 +272,7 @@ SELECT (a + b) AS c FROM test_complex; There are additional selectivity functions designed for geometric - operators in src/backend/utils/adt/geo_selfuncs.c: areasel, positionsel, + operators in src/backend/utils/adt/geo_selfuncs.c: areasel, positionsel, and contsel. At this writing these are just stubs, but you may want to use them (or even better, improve them) anyway. @@ -351,12 +351,12 @@ SELECT (a + b) AS c FROM test_complex; There are also machine-dependent ways in which a hash join might fail - to do the right thing. For example, if your datatype + to do the right thing. For example, if your data type is a structure in which there may be uninteresting pad bits, it's unsafe to mark the equality operator HASHES. (Unless, perhaps, you write your other operators to ensure that the unused bits are always zero.) - Another example is that the FLOAT datatypes are unsafe for hash - joins. On machines that meet the IEEE floating point standard, minus + Another example is that the FLOAT data types are unsafe for hash + joins. On machines that meet the IEEE floating point standard, minus zero and plus zero are different values (different bit patterns) but they are defined to compare equal. So, if float equality were marked HASHES, a minus zero and a plus zero would probably not be matched up @@ -365,7 +365,7 @@ SELECT (a + b) AS c FROM test_complex; The bottom line is that you should probably only use HASHES for - equality operators that are (or could be) implemented by memcmp(). + equality operators that are (or could be) implemented by memcmp(). @@ -393,16 +393,16 @@ SELECT (a + b) AS c FROM test_complex; it is possible to mergejoin two distinct data types so long as they are logically compatible. For example, the int2-versus-int4 equality operator is mergejoinable. - We only need sorting operators that will bring both datatypes into a + We only need sorting operators that will bring both data types into a logically compatible sequence. When specifying merge sort operators, the current operator and both referenced operators must return boolean; the SORT1 operator must have - both input datatypes equal to the current operator's left argument type, + both input data types equal to the current operator's left argument type, and the SORT2 operator must have - both input datatypes equal to the current operator's right argument type. + both input data types equal to the current operator's right argument type. (As with COMMUTATOR and NEGATOR, this means that the operator name is sufficient to specify the operator, and the system is able to make dummy operator entries if you happen to define the equality operator before @@ -434,7 +434,7 @@ SELECT (a + b) AS c FROM test_complex; There must be '<' and '>' ordering operators having the same left and - right input datatypes as the mergejoinable operator itself. These + right input data types as the mergejoinable operator itself. These operators must be named '<' and '>'; you do not have any choice in the matter, since there is no provision for specifying them explicitly. Note that if the left and right data types diff --git a/doc/src/sgml/xplang.sgml b/doc/src/sgml/xplang.sgml index 6118836c54..bcb7dd0180 100644 --- a/doc/src/sgml/xplang.sgml +++ b/doc/src/sgml/xplang.sgml @@ -1,5 +1,5 @@ @@ -44,7 +44,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xplang.sgml,v 1.14 2001/08/13 21:34:51 pete For the languages supplied with the standard distribution, the shell script createlang may be used instead - of carrying out the details by hand. For example, to install PL/pgSQL + of carrying out the details by hand. For example, to install PL/pgSQL into the template1 database, use createlang plpgsql template1 @@ -102,8 +102,8 @@ CREATE TRUSTED PROCEDURAL LANGUAGE TRUSTED flag should only be given for languages that do not allow access to database backends - internals or the filesystem. The languages PL/pgSQL, - PL/Tcl, and PL/Perl are known to be trusted; the language PL/TclU + internals or the file system. The languages PL/pgSQL, + PL/Tcl, and PL/Perl are known to be trusted; the language PL/TclU should not be marked trusted. @@ -111,7 +111,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE In a default Postgres installation, the - handler for the PL/pgSQL language is built and installed into the + handler for the PL/pgSQL language is built and installed into the library directory. If Tcl/Tk support is configured in, the handlers for PL/Tcl and PL/TclU are also built and installed in the same location. Likewise, the PL/Perl handler is built and installed @@ -125,7 +125,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE The following command tells the database where to find the - shared object for the PL/pgSQL language's call handler function. + shared object for the PL/pgSQL language's call handler function. CREATE FUNCTION plpgsql_call_handler () RETURNS OPAQUE AS @@ -143,7 +143,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE plpgsql then defines that the previously declared call handler function should be invoked for functions and trigger procedures where the - language attribute is 'plpgsql'. + language attribute is plpgsql.