postgresql/src/interfaces/libpq/fe-misc.c

901 lines
21 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* FILE
* fe-misc.c
*
* DESCRIPTION
* miscellaneous useful functions
*
* The communication routines here are analogous to the ones in
* backend/libpq/pqcomm.c and backend/libpq/pqcomprim.c, but operate
* in the considerably different environment of the frontend libpq.
* In particular, we work with a bare nonblock-mode socket, rather than
* a stdio stream, so that we can avoid unwanted blocking of the application.
*
* XXX: MOVE DEBUG PRINTOUT TO HIGHER LEVEL. As is, block and restart
* will cause repeat printouts.
*
* We must speak the same transmitted data representations as the backend
* routines. Note that this module supports *only* network byte order
* for transmitted ints, whereas the backend modules (as of this writing)
* still handle either network or little-endian byte order.
*
2002-06-20 22:29:54 +02:00
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/interfaces/libpq/fe-misc.c,v 1.83 2002/10/14 18:11:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <errno.h>
#include <signal.h>
#include <time.h>
#ifdef WIN32
#include "win32.h"
1999-07-19 08:25:40 +02:00
#else
#include <unistd.h>
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#include "libpq-fe.h"
#include "libpq-int.h"
#include "pqsignal.h"
#include "mb/pg_wchar.h"
#define DONOTICE(conn,message) \
((*(conn)->noticeHook) ((conn)->noticeArg, (message)))
static int pqPutBytes(const char *s, size_t nbytes, PGconn *conn);
/*
* pqGetc:
* get a character from the connection
*
* All these routines return 0 on success, EOF on error.
* Note that for the Get routines, EOF only means there is not enough
* data in the buffer, not that there is necessarily a hard error.
*/
int
pqGetc(char *result, PGconn *conn)
{
if (conn->inCursor >= conn->inEnd)
return EOF;
*result = conn->inBuffer[conn->inCursor++];
if (conn->Pfdebug)
fprintf(conn->Pfdebug, "From backend> %c\n", *result);
return 0;
}
/*
* write 1 char to the connection
*/
int
pqPutc(char c, PGconn *conn)
{
if (pqPutBytes(&c, 1, conn) == EOF)
return EOF;
if (conn->Pfdebug)
fprintf(conn->Pfdebug, "To backend> %c\n", c);
return 0;
}
/*
* pqPutBytes: local routine to write N bytes to the connection,
* with buffering
*/
static int
pqPutBytes(const char *s, size_t nbytes, PGconn *conn)
{
2002-09-04 22:31:48 +02:00
/*
* Strategy to handle blocking and non-blocking connections: Fill the
* output buffer and flush it repeatedly until either all data has
* been sent or is at least queued in the buffer.
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
*
2002-09-04 22:31:48 +02:00
* For non-blocking connections, grow the buffer if not all data fits
* into it and the buffer can't be sent because the socket would
* block.
*/
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
while (nbytes)
{
2002-09-04 22:31:48 +02:00
size_t avail,
remaining;
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
/* fill the output buffer */
avail = Max(conn->outBufSize - conn->outCount, 0);
remaining = Min(avail, nbytes);
memcpy(conn->outBuffer + conn->outCount, s, remaining);
conn->outCount += remaining;
s += remaining;
nbytes -= remaining;
2002-09-04 22:31:48 +02:00
/*
* if the data didn't fit completely into the buffer, try to flush
* the buffer
*/
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
if (nbytes)
{
2002-09-04 22:31:48 +02:00
int send_result = pqSendSome(conn);
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
/* if there were errors, report them */
if (send_result < 0)
return EOF;
2002-09-04 22:31:48 +02:00
/*
* if not all data could be sent, increase the output buffer,
* put the rest of s into it and return successfully. This
* case will only happen in a non-blocking connection
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
*/
if (send_result > 0)
{
2002-09-04 22:31:48 +02:00
/*
* try to grow the buffer. FIXME: The new size could be
* chosen more intelligently.
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
*/
size_t buflen = (size_t) conn->outCount + nbytes;
2002-09-04 22:31:48 +02:00
if (buflen > (size_t) conn->outBufSize)
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
{
2002-09-04 22:31:48 +02:00
char *newbuf = realloc(conn->outBuffer, buflen);
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
if (!newbuf)
{
/* realloc failed. Probably out of memory */
printfPQExpBuffer(&conn->errorMessage,
2002-09-04 22:31:48 +02:00
"cannot allocate memory for output buffer\n");
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
return EOF;
}
conn->outBuffer = newbuf;
conn->outBufSize = buflen;
}
/* put the data into it */
memcpy(conn->outBuffer + conn->outCount, s, nbytes);
conn->outCount += nbytes;
/* report success. */
return 0;
}
}
2002-09-04 22:31:48 +02:00
/*
* pqSendSome was able to send all data. Continue with the next
* chunk of s.
*/
} /* while */
return 0;
}
/*
* pqGets:
* get a null-terminated string from the connection,
* and store it in an expansible PQExpBuffer.
* If we run out of memory, all of the string is still read,
* but the excess characters are silently discarded.
*/
int
pqGets(PQExpBuffer buf, PGconn *conn)
{
/* Copy conn data to locals for faster search loop */
char *inBuffer = conn->inBuffer;
int inCursor = conn->inCursor;
int inEnd = conn->inEnd;
int slen;
while (inCursor < inEnd && inBuffer[inCursor])
inCursor++;
if (inCursor >= inEnd)
return EOF;
slen = inCursor - conn->inCursor;
resetPQExpBuffer(buf);
appendBinaryPQExpBuffer(buf, inBuffer + conn->inCursor, slen);
conn->inCursor = ++inCursor;
if (conn->Pfdebug)
fprintf(conn->Pfdebug, "From backend> \"%s\"\n",
buf->data);
return 0;
}
int
pqPuts(const char *s, PGconn *conn)
{
if (pqPutBytes(s, strlen(s) + 1, conn))
return EOF;
if (conn->Pfdebug)
fprintf(conn->Pfdebug, "To backend> %s\n", s);
return 0;
}
/*
* pqGetnchar:
* get a string of exactly len bytes in buffer s, no null termination
*/
int
pqGetnchar(char *s, size_t len, PGconn *conn)
{
if (len < 0 || len > (size_t) (conn->inEnd - conn->inCursor))
return EOF;
memcpy(s, conn->inBuffer + conn->inCursor, len);
/* no terminating null */
conn->inCursor += len;
if (conn->Pfdebug)
2001-03-22 05:01:46 +01:00
fprintf(conn->Pfdebug, "From backend (%lu)> %.*s\n", (unsigned long) len, (int) len, s);
return 0;
}
/*
* pqPutnchar:
* send a string of exactly len bytes, no null termination needed
*/
int
pqPutnchar(const char *s, size_t len, PGconn *conn)
{
if (pqPutBytes(s, len, conn))
return EOF;
if (conn->Pfdebug)
fprintf(conn->Pfdebug, "To backend> %.*s\n", (int) len, s);
return 0;
}
/*
* pgGetInt
* read a 2 or 4 byte integer and convert from network byte order
* to local byte order
*/
int
pqGetInt(int *result, size_t bytes, PGconn *conn)
{
uint16 tmp2;
uint32 tmp4;
char noticeBuf[64];
switch (bytes)
{
case 2:
if (conn->inCursor + 2 > conn->inEnd)
return EOF;
memcpy(&tmp2, conn->inBuffer + conn->inCursor, 2);
conn->inCursor += 2;
*result = (int) ntohs(tmp2);
break;
case 4:
if (conn->inCursor + 4 > conn->inEnd)
return EOF;
memcpy(&tmp4, conn->inBuffer + conn->inCursor, 4);
conn->inCursor += 4;
*result = (int) ntohl(tmp4);
break;
default:
snprintf(noticeBuf, sizeof(noticeBuf),
libpq_gettext("integer of size %lu not supported by pqGetInt\n"),
(unsigned long) bytes);
DONOTICE(conn, noticeBuf);
return EOF;
}
if (conn->Pfdebug)
2001-03-22 05:01:46 +01:00
fprintf(conn->Pfdebug, "From backend (#%lu)> %d\n", (unsigned long) bytes, *result);
return 0;
}
/*
* pgPutInt
* send an integer of 2 or 4 bytes, converting from host byte order
* to network byte order.
*/
int
pqPutInt(int value, size_t bytes, PGconn *conn)
{
uint16 tmp2;
uint32 tmp4;
char noticeBuf[64];
switch (bytes)
{
case 2:
tmp2 = htons((uint16) value);
if (pqPutBytes((const char *) &tmp2, 2, conn))
return EOF;
break;
case 4:
tmp4 = htonl((uint32) value);
if (pqPutBytes((const char *) &tmp4, 4, conn))
return EOF;
break;
default:
snprintf(noticeBuf, sizeof(noticeBuf),
libpq_gettext("integer of size %lu not supported by pqPutInt\n"),
(unsigned long) bytes);
DONOTICE(conn, noticeBuf);
return EOF;
}
if (conn->Pfdebug)
2001-03-22 05:01:46 +01:00
fprintf(conn->Pfdebug, "To backend (%lu#)> %d\n", (unsigned long) bytes, value);
return 0;
}
/*
* pqReadReady: is select() saying the file is ready to read?
* JAB: -or- if SSL is enabled and used, is it buffering bytes?
* Returns -1 on failure, 0 if not ready, 1 if ready.
*/
int
pqReadReady(PGconn *conn)
{
fd_set input_mask;
struct timeval timeout;
if (!conn || conn->sock < 0)
return -1;
/* JAB: Check for SSL library buffering read bytes */
#ifdef USE_SSL
if (conn->ssl && SSL_pending(conn->ssl) > 0)
{
/* short-circuit the select */
return 1;
}
#endif
retry1:
FD_ZERO(&input_mask);
FD_SET(conn->sock, &input_mask);
timeout.tv_sec = 0;
timeout.tv_usec = 0;
if (select(conn->sock + 1, &input_mask, (fd_set *) NULL, (fd_set *) NULL,
&timeout) < 0)
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry1;
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("select() failed: %s\n"),
SOCK_STRERROR(SOCK_ERRNO));
return -1;
}
return FD_ISSET(conn->sock, &input_mask) ? 1 : 0;
}
/*
* pqWriteReady: is select() saying the file is ready to write?
* Returns -1 on failure, 0 if not ready, 1 if ready.
*/
int
pqWriteReady(PGconn *conn)
{
fd_set input_mask;
struct timeval timeout;
if (!conn || conn->sock < 0)
return -1;
retry2:
FD_ZERO(&input_mask);
FD_SET(conn->sock, &input_mask);
timeout.tv_sec = 0;
timeout.tv_usec = 0;
if (select(conn->sock + 1, (fd_set *) NULL, &input_mask, (fd_set *) NULL,
&timeout) < 0)
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry2;
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("select() failed: %s\n"),
SOCK_STRERROR(SOCK_ERRNO));
return -1;
}
return FD_ISSET(conn->sock, &input_mask) ? 1 : 0;
}
/* ----------
* pqReadData: read more data, if any is available
* Possible return values:
* 1: successfully loaded at least one more byte
* 0: no data is presently available, but no error detected
* -1: error detected (including EOF = connection closure);
* conn->errorMessage set
* NOTE: callers must not assume that pointers or indexes into conn->inBuffer
* remain valid across this call!
* ----------
*/
int
pqReadData(PGconn *conn)
{
int someread = 0;
int nread;
if (conn->sock < 0)
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("connection not open\n"));
return -1;
}
/* Left-justify any data in the buffer to make room */
if (conn->inStart < conn->inEnd)
{
if (conn->inStart > 0)
{
memmove(conn->inBuffer, conn->inBuffer + conn->inStart,
conn->inEnd - conn->inStart);
conn->inEnd -= conn->inStart;
conn->inCursor -= conn->inStart;
conn->inStart = 0;
}
}
else
{
/* buffer is logically empty, reset it */
conn->inStart = conn->inCursor = conn->inEnd = 0;
}
/*
* If the buffer is fairly full, enlarge it. We need to be able to
* enlarge the buffer in case a single message exceeds the initial
* buffer size. We enlarge before filling the buffer entirely so as
* to avoid asking the kernel for a partial packet. The magic constant
* here should be large enough for a TCP packet or Unix pipe
* bufferload. 8K is the usual pipe buffer size, so...
*/
if (conn->inBufSize - conn->inEnd < 8192)
{
int newSize = conn->inBufSize * 2;
char *newBuf = (char *) realloc(conn->inBuffer, newSize);
if (newBuf)
{
conn->inBuffer = newBuf;
conn->inBufSize = newSize;
}
}
/* OK, try to read some data */
retry3:
nread = pqsecure_read(conn, conn->inBuffer + conn->inEnd,
2002-09-04 22:31:48 +02:00
conn->inBufSize - conn->inEnd);
if (nread < 0)
{
if (SOCK_ERRNO == EINTR)
goto retry3;
/* Some systems return EAGAIN/EWOULDBLOCK for no data */
#ifdef EAGAIN
if (SOCK_ERRNO == EAGAIN)
return someread;
#endif
#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN))
if (SOCK_ERRNO == EWOULDBLOCK)
return someread;
1998-09-20 06:51:12 +02:00
#endif
/* We might get ECONNRESET here if using TCP and backend died */
#ifdef ECONNRESET
if (SOCK_ERRNO == ECONNRESET)
1998-09-20 06:51:12 +02:00
goto definitelyFailed;
#endif
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not receive data from server: %s\n"),
SOCK_STRERROR(SOCK_ERRNO));
return -1;
}
if (nread > 0)
{
conn->inEnd += nread;
/*
* Hack to deal with the fact that some kernels will only give us
* back 1 packet per recv() call, even if we asked for more and
* there is more available. If it looks like we are reading a
* long message, loop back to recv() again immediately, until we
* run out of data or buffer space. Without this, the
* block-and-restart behavior of libpq's higher levels leads to
* O(N^2) performance on long messages.
*
* Since we left-justified the data above, conn->inEnd gives the
* amount of data already read in the current message. We
* consider the message "long" once we have acquired 32k ...
*/
if (conn->inEnd > 32768 &&
(conn->inBufSize - conn->inEnd) >= 8192)
{
someread = 1;
goto retry3;
}
return 1;
}
if (someread)
return 1; /* got a zero read after successful tries */
/*
* A return value of 0 could mean just that no data is now available,
* or it could mean EOF --- that is, the server has closed the
* connection. Since we have the socket in nonblock mode, the only way
* to tell the difference is to see if select() is saying that the
* file is ready. Grumble. Fortunately, we don't expect this path to
* be taken much, since in normal practice we should not be trying to
* read data unless the file selected for reading already.
*/
switch (pqReadReady(conn))
{
case 0:
/* definitely no data available */
return 0;
case 1:
/* ready for read */
break;
default:
goto definitelyFailed;
}
/*
* Still not sure that it's EOF, because some data could have just
* arrived.
*/
retry4:
nread = pqsecure_read(conn, conn->inBuffer + conn->inEnd,
conn->inBufSize - conn->inEnd);
if (nread < 0)
{
if (SOCK_ERRNO == EINTR)
goto retry4;
/* Some systems return EAGAIN/EWOULDBLOCK for no data */
#ifdef EAGAIN
if (SOCK_ERRNO == EAGAIN)
return 0;
#endif
#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN))
if (SOCK_ERRNO == EWOULDBLOCK)
return 0;
1998-09-20 06:51:12 +02:00
#endif
/* We might get ECONNRESET here if using TCP and backend died */
#ifdef ECONNRESET
if (SOCK_ERRNO == ECONNRESET)
1998-09-20 06:51:12 +02:00
goto definitelyFailed;
#endif
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not receive data from server: %s\n"),
SOCK_STRERROR(SOCK_ERRNO));
return -1;
}
if (nread > 0)
{
conn->inEnd += nread;
return 1;
}
/*
* OK, we are getting a zero read even though select() says ready.
* This means the connection has been closed. Cope.
*/
1998-09-20 06:51:12 +02:00
definitelyFailed:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server closed the connection unexpectedly\n"
"\tThis probably means the server terminated abnormally\n"
"\tbefore or while processing the request.\n"));
conn->status = CONNECTION_BAD; /* No more connection to backend */
pqsecure_close(conn);
#ifdef WIN32
closesocket(conn->sock);
#else
close(conn->sock);
#endif
conn->sock = -1;
return -1;
}
/*
* pqSendSome: send any data waiting in the output buffer.
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
*
* Return 0 on sucess, -1 on failure and 1 when data remains because the
* socket would block and the connection is non-blocking.
*/
int
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
pqSendSome(PGconn *conn)
{
char *ptr = conn->outBuffer;
int len = conn->outCount;
if (conn->sock < 0)
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("connection not open\n"));
return -1;
}
/*
* don't try to send zero data, allows us to use this function without
* too much worry about overhead
*/
if (len == 0)
return (0);
/* while there's still data to send */
while (len > 0)
{
int sent;
sent = pqsecure_write(conn, ptr, len);
if (sent < 0)
{
1998-09-20 06:51:12 +02:00
/*
1999-05-25 18:15:34 +02:00
* Anything except EAGAIN or EWOULDBLOCK is trouble. If it's
* EPIPE or ECONNRESET, assume we've lost the backend
* connection permanently.
1998-09-20 06:51:12 +02:00
*/
switch (SOCK_ERRNO)
{
#ifdef EAGAIN
case EAGAIN:
break;
#endif
#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN))
case EWOULDBLOCK:
break;
#endif
case EINTR:
continue;
1998-09-20 06:51:12 +02:00
case EPIPE:
#ifdef ECONNRESET
case ECONNRESET:
#endif
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server closed the connection unexpectedly\n"
2002-09-04 22:31:48 +02:00
"\tThis probably means the server terminated abnormally\n"
"\tbefore or while processing the request.\n"));
/*
* We used to close the socket here, but that's a bad
* idea since there might be unread data waiting
* (typically, a WARNING message from the backend
* telling us it's committing hara-kiri...). Leave
* the socket open until pqReadData finds no more data
* can be read.
*/
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
return -1;
default:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send data to server: %s\n"),
SOCK_STRERROR(SOCK_ERRNO));
1998-09-20 06:51:12 +02:00
/* We don't assume it's a fatal error... */
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
return -1;
}
}
else
{
ptr += sent;
len -= sent;
}
if (len > 0)
{
/* We didn't send it all, wait till we can send more */
/*
* if the socket is in non-blocking mode we may need to abort
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
* here and return 1 to indicate that data is still pending.
*/
#ifdef USE_SSL
/* can't do anything for our SSL users yet */
if (conn->ssl == NULL)
{
#endif
if (pqIsnonblocking(conn))
{
/* shift the contents of the buffer */
memmove(conn->outBuffer, ptr, len);
conn->outCount = len;
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
return 1;
}
#ifdef USE_SSL
}
#endif
if (pqWait(FALSE, TRUE, conn))
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
return -1;
}
}
conn->outCount = 0;
if (conn->Pfdebug)
fflush(conn->Pfdebug);
return 0;
}
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
Here's a patch against 7.1.3 that fixes a problem with sending larger queries over non-blocking connections with libpq. "Larger" here basically means that it doesn't fit into the output buffer. The basic strategy is to fix pqFlush and pqPutBytes. The problem with pqFlush as it stands now is that it returns EOF when an error occurs or when not all data could be sent. The latter case is clearly not an error for a non-blocking connection but the caller can't distringuish it from an error very well. The first part of the fix is therefore to fix pqFlush. This is done by to renaming it to pqSendSome which only differs from pqFlush in its return values to allow the caller to make the above distinction and a new pqFlush which is implemented in terms of pqSendSome and behaves exactly like the old pqFlush. The second part of the fix modifies pqPutBytes to use pqSendSome instead of pqFlush and to either send all the data or if not all data can be sent on a non-blocking connection to at least put all data into the output buffer, enlarging it if necessary. The callers of pqPutBytes don't have to be changed because from their point of view pqPutBytes behaves like before. It either succeeds in queueing all output data or fails with an error. I've also added a new API function PQsendSome which analogously to PQflush just calls pqSendSome. Programs using non-blocking queries should use this new function. The main difference is that this function will have to be called repeatedly (calling select() properly in between) until all data has been written. AFAICT, the code in CVS HEAD hasn't changed with respect to non-blocking queries and this fix should work there, too, but I haven't tested that yet. Bernhard Herzog
2002-03-05 06:20:12 +01:00
/*
* pqFlush: send any data waiting in the output buffer
*
* Implemented in terms of pqSendSome to recreate the old behavior which
* returned 0 if all data was sent or EOF. EOF was sent regardless of
* whether an error occurred or not all data was sent on a non-blocking
* socket.
*/
int
pqFlush(PGconn *conn)
{
if (pqSendSome(conn))
return EOF;
return 0;
}
/*
* pqWait: wait until we can read or write the connection socket
*
* JAB: If SSL enabled and used and forRead, buffered bytes short-circuit the
* call to select().
*
* We also stop waiting and return if the kernel flags an exception condition
* on the socket. The actual error condition will be detected and reported
* when the caller tries to read or write the socket.
*/
int
pqWait(int forRead, int forWrite, PGconn *conn)
{
2002-09-04 22:31:48 +02:00
return pqWaitTimed(forRead, forWrite, conn, (const struct timeval *) NULL);
}
int
pqWaitTimed(int forRead, int forWrite, PGconn *conn, const struct timeval *timeout)
{
fd_set input_mask;
fd_set output_mask;
fd_set except_mask;
2002-09-04 22:31:48 +02:00
struct timeval tmp_timeout;
struct timeval *ptmp_timeout = NULL;
if (conn->sock < 0)
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("connection not open\n"));
return EOF;
}
/* JAB: Check for SSL library buffering read bytes */
#ifdef USE_SSL
if (forRead && conn->ssl && SSL_pending(conn->ssl) > 0)
{
/* short-circuit the select */
return 0;
}
#endif
if (forRead || forWrite)
{
retry5:
FD_ZERO(&input_mask);
FD_ZERO(&output_mask);
FD_ZERO(&except_mask);
if (forRead)
FD_SET(conn->sock, &input_mask);
if (forWrite)
FD_SET(conn->sock, &output_mask);
FD_SET(conn->sock, &except_mask);
2002-09-04 22:31:48 +02:00
if (NULL != timeout)
{
/*
* select() may modify timeout argument on some platforms so
* use copy.
* XXX Do we really want to do that? If select() returns
* the number of seconds remaining, we are resetting
* the timeout to its original value. This will yeild
* incorrect timings when select() is interrupted.
* bjm 2002-10-14
2002-09-04 22:31:48 +02:00
*/
tmp_timeout = *timeout;
ptmp_timeout = &tmp_timeout;
2002-09-04 22:31:48 +02:00
}
if (select(conn->sock + 1, &input_mask, &output_mask,
&except_mask, ptmp_timeout) < 0)
{
if (SOCK_ERRNO == EINTR)
goto retry5;
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("select() failed: %s\n"),
SOCK_STRERROR(SOCK_ERRNO));
return EOF;
}
}
return 0;
}
/*
* A couple of "miscellaneous" multibyte related functions. They used
* to be in fe-print.c but that file is doomed.
*/
/*
* returns the byte length of the word beginning s, using the
* specified encoding.
*/
int
PQmblen(const unsigned char *s, int encoding)
{
return (pg_encoding_mblen(encoding, s));
}
/*
* Get encoding id from environment variable PGCLIENTENCODING.
*/
int
PQenv2encoding(void)
{
char *str;
Commit Karel's patch. ------------------------------------------------------------------- Subject: Re: [PATCHES] encoding names From: Karel Zak <zakkr@zf.jcu.cz> To: Peter Eisentraut <peter_e@gmx.net> Cc: pgsql-patches <pgsql-patches@postgresql.org> Date: Fri, 31 Aug 2001 17:24:38 +0200 On Thu, Aug 30, 2001 at 01:30:40AM +0200, Peter Eisentraut wrote: > > - convert encoding 'name' to 'id' > > I thought we decided not to add functions returning "new" names until we > know exactly what the new names should be, and pending schema Ok, the patch not to add functions. > better > > ...(): encoding name too long Fixed. I found new bug in command/variable.c in parse_client_encoding(), nobody probably never see this error: if (pg_set_client_encoding(encoding)) { elog(ERROR, "Conversion between %s and %s is not supported", value, GetDatabaseEncodingName()); } because pg_set_client_encoding() returns -1 for error and 0 as true. It's fixed too. IMHO it can be apply. Karel PS: * following files are renamed: src/utils/mb/Unicode/KOI8_to_utf8.map --> src/utils/mb/Unicode/koi8r_to_utf8.map src/utils/mb/Unicode/WIN_to_utf8.map --> src/utils/mb/Unicode/win1251_to_utf8.map src/utils/mb/Unicode/utf8_to_KOI8.map --> src/utils/mb/Unicode/utf8_to_koi8r.map src/utils/mb/Unicode/utf8_to_WIN.map --> src/utils/mb/Unicode/utf8_to_win1251.map * new file: src/utils/mb/encname.c * removed file: src/utils/mb/common.c -- Karel Zak <zakkr@zf.jcu.cz> http://home.zf.jcu.cz/~zakkr/ C, PostgreSQL, PHP, WWW, http://docs.linux.cz, http://mape.jcu.cz
2001-09-06 06:57:30 +02:00
int encoding = PG_SQL_ASCII;
str = getenv("PGCLIENTENCODING");
if (str && *str != '\0')
encoding = pg_char_to_encoding(str);
return (encoding);
}
#ifdef ENABLE_NLS
char *
libpq_gettext(const char *msgid)
{
static int already_bound = 0;
if (!already_bound)
{
already_bound = 1;
bindtextdomain("libpq", LOCALEDIR);
}
return dgettext("libpq", msgid);
}
2002-09-04 22:31:48 +02:00
#endif /* ENABLE_NLS */