missed a few files

This commit is contained in:
PostgreSQL Daemon 2004-01-19 20:08:27 +00:00
parent 2a9bf5b33d
commit b195c10df7
12 changed files with 0 additions and 2271 deletions

View File

@ -1,6 +0,0 @@
.classpath
.project
.externalToolBuilders
build
build.properties
jars

View File

@ -1 +0,0 @@
Driver.java

View File

@ -1,265 +0,0 @@
package org.postgresql.jdbc2.optional;
import javax.naming.*;
import java.io.PrintWriter;
import java.sql.*;
/**
* Base class for data sources and related classes.
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
* @version $Revision: 1.3 $
*/
public abstract class BaseDataSource implements Referenceable
{
// Load the normal driver, since we'll use it to actually connect to the
// database. That way we don't have to maintain the connecting code in
// multiple places.
static {
try
{
Class.forName("org.postgresql.Driver");
}
catch (ClassNotFoundException e)
{
System.err.println("PostgreSQL DataSource unable to load PostgreSQL JDBC Driver");
}
}
// Needed to implement the DataSource/ConnectionPoolDataSource interfaces
private transient PrintWriter logger;
// Don't track loginTimeout, since we'd just ignore it anyway
// Standard properties, defined in the JDBC 2.0 Optional Package spec
private String serverName = "localhost";
private String databaseName;
private String user;
private String password;
private int portNumber;
/**
* Gets a connection to the PostgreSQL database. The database is identified by the
* DataSource properties serverName, databaseName, and portNumber. The user to
* connect as is identified by the DataSource properties user and password.
*
* @return A valid database connection.
* @throws SQLException
* Occurs when the database connection cannot be established.
*/
public Connection getConnection() throws SQLException
{
return getConnection(user, password);
}
/**
* Gets a connection to the PostgreSQL database. The database is identified by the
* DataAource properties serverName, databaseName, and portNumber. The user to
* connect as is identified by the arguments user and password, which override
* the DataSource properties by the same name.
*
* @return A valid database connection.
* @throws SQLException
* Occurs when the database connection cannot be established.
*/
public Connection getConnection(String user, String password) throws SQLException
{
try
{
Connection con = DriverManager.getConnection(getUrl(), user, password);
if (logger != null)
{
logger.println("Created a non-pooled connection for " + user + " at " + getUrl());
}
return con;
}
catch (SQLException e)
{
if (logger != null)
{
logger.println("Failed to create a non-pooled connection for " + user + " at " + getUrl() + ": " + e);
}
throw e;
}
}
/**
* This DataSource does not support a configurable login timeout.
* @return 0
*/
public int getLoginTimeout() throws SQLException
{
return 0;
}
/**
* This DataSource does not support a configurable login timeout. Any value
* provided here will be ignored.
*/
public void setLoginTimeout(int i) throws SQLException
{}
/**
* Gets the log writer used to log connections opened.
*/
public PrintWriter getLogWriter() throws SQLException
{
return logger;
}
/**
* The DataSource will note every connection opened to the provided log writer.
*/
public void setLogWriter(PrintWriter printWriter) throws SQLException
{
logger = printWriter;
}
/**
* Gets the name of the host the PostgreSQL database is running on.
*/
public String getServerName()
{
return serverName;
}
/**
* Sets the name of the host the PostgreSQL database is running on. If this
* is changed, it will only affect future calls to getConnection. The default
* value is <tt>localhost</tt>.
*/
public void setServerName(String serverName)
{
if (serverName == null || serverName.equals(""))
{
this.serverName = "localhost";
}
else
{
this.serverName = serverName;
}
}
/**
* Gets the name of the PostgreSQL database, running on the server identified
* by the serverName property.
*/
public String getDatabaseName()
{
return databaseName;
}
/**
* Sets the name of the PostgreSQL database, running on the server identified
* by the serverName property. If this is changed, it will only affect
* future calls to getConnection.
*/
public void setDatabaseName(String databaseName)
{
this.databaseName = databaseName;
}
/**
* Gets a description of this DataSource-ish thing. Must be customized by
* subclasses.
*/
public abstract String getDescription();
/**
* Gets the user to connect as by default. If this is not specified, you must
* use the getConnection method which takes a user and password as parameters.
*/
public String getUser()
{
return user;
}
/**
* Sets the user to connect as by default. If this is not specified, you must
* use the getConnection method which takes a user and password as parameters.
* If this is changed, it will only affect future calls to getConnection.
*/
public void setUser(String user)
{
this.user = user;
}
/**
* Gets the password to connect with by default. If this is not specified but a
* password is needed to log in, you must use the getConnection method which takes
* a user and password as parameters.
*/
public String getPassword()
{
return password;
}
/**
* Sets the password to connect with by default. If this is not specified but a
* password is needed to log in, you must use the getConnection method which takes
* a user and password as parameters. If this is changed, it will only affect
* future calls to getConnection.
*/
public void setPassword(String password)
{
this.password = password;
}
/**
* Gets the port which the PostgreSQL server is listening on for TCP/IP
* connections.
*
* @return The port, or 0 if the default port will be used.
*/
public int getPortNumber()
{
return portNumber;
}
/**
* Gets the port which the PostgreSQL server is listening on for TCP/IP
* connections. Be sure the -i flag is passed to postmaster when PostgreSQL
* is started. If this is not set, or set to 0, the default port will be used.
*/
public void setPortNumber(int portNumber)
{
this.portNumber = portNumber;
}
/**
* Generates a DriverManager URL from the other properties supplied.
*/
private String getUrl()
{
return "jdbc:postgresql://" + serverName + (portNumber == 0 ? "" : ":" + portNumber) + "/" + databaseName;
}
/**
* Generates a reference using the appropriate object factory. This
* implementation uses the JDBC 2 optional package object factory.
*/
protected Reference createReference()
{
return new Reference(getClass().getName(), PGObjectFactory.class.getName(), null);
}
public Reference getReference() throws NamingException
{
Reference ref = createReference();
ref.add(new StringRefAddr("serverName", serverName));
if (portNumber != 0)
{
ref.add(new StringRefAddr("portNumber", Integer.toString(portNumber)));
}
ref.add(new StringRefAddr("databaseName", databaseName));
if (user != null)
{
ref.add(new StringRefAddr("user", user));
}
if (password != null)
{
ref.add(new StringRefAddr("password", password));
}
return ref;
}
}

View File

@ -1,82 +0,0 @@
package org.postgresql.jdbc2.optional;
import javax.sql.ConnectionPoolDataSource;
import javax.sql.PooledConnection;
import java.sql.SQLException;
import java.io.Serializable;
/**
* PostgreSQL implementation of ConnectionPoolDataSource. The app server or
* middleware vendor should provide a DataSource implementation that takes advantage
* of this ConnectionPoolDataSource. If not, you can use the PostgreSQL implementation
* known as PoolingDataSource, but that should only be used if your server or middleware
* vendor does not provide their own. Why? The server may want to reuse the same
* Connection across all EJBs requesting a Connection within the same Transaction, or
* provide other similar advanced features.
*
* <p>In any case, in order to use this ConnectionPoolDataSource, you must set the property
* databaseName. The settings for serverName, portNumber, user, and password are
* optional. Note: these properties are declared in the superclass.</p>
*
* <p>This implementation supports JDK 1.3 and higher.</p>
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
* @version $Revision: 1.2 $
*/
public class ConnectionPool extends BaseDataSource implements Serializable, ConnectionPoolDataSource
{
private boolean defaultAutoCommit = false;
/**
* Gets a description of this DataSource.
*/
public String getDescription()
{
return "ConnectionPoolDataSource from " + org.postgresql.Driver.getVersion();
}
/**
* Gets a connection which may be pooled by the app server or middleware
* implementation of DataSource.
*
* @throws java.sql.SQLException
* Occurs when the physical database connection cannot be established.
*/
public PooledConnection getPooledConnection() throws SQLException
{
return new PooledConnectionImpl(getConnection(), defaultAutoCommit);
}
/**
* Gets a connection which may be pooled by the app server or middleware
* implementation of DataSource.
*
* @throws java.sql.SQLException
* Occurs when the physical database connection cannot be established.
*/
public PooledConnection getPooledConnection(String user, String password) throws SQLException
{
return new PooledConnectionImpl(getConnection(user, password), defaultAutoCommit);
}
/**
* Gets whether connections supplied by this pool will have autoCommit
* turned on by default. The default value is <tt>false</tt>, so that
* autoCommit will be turned off by default.
*/
public boolean isDefaultAutoCommit()
{
return defaultAutoCommit;
}
/**
* Sets whether connections supplied by this pool will have autoCommit
* turned on by default. The default value is <tt>false</tt>, so that
* autoCommit will be turned off by default.
*/
public void setDefaultAutoCommit(boolean defaultAutoCommit)
{
this.defaultAutoCommit = defaultAutoCommit;
}
}

View File

@ -1,108 +0,0 @@
package org.postgresql.jdbc2.optional;
import javax.naming.spi.ObjectFactory;
import javax.naming.*;
import java.util.Hashtable;
/**
* Returns a DataSource-ish thing based on a JNDI reference. In the case of a
* SimpleDataSource or ConnectionPool, a new instance is created each time, as
* there is no connection state to maintain. In the case of a PoolingDataSource,
* the same DataSource will be returned for every invocation within the same
* VM/ClassLoader, so that the state of the connections in the pool will be
* consistent.
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
* @version $Revision: 1.3 $
*/
public class PGObjectFactory implements ObjectFactory
{
/**
* Dereferences a PostgreSQL DataSource. Other types of references are
* ignored.
*/
public Object getObjectInstance(Object obj, Name name, Context nameCtx,
Hashtable environment) throws Exception
{
Reference ref = (Reference)obj;
if (ref.getClassName().equals(SimpleDataSource.class.getName()))
{
return loadSimpleDataSource(ref);
}
else if (ref.getClassName().equals(ConnectionPool.class.getName()))
{
return loadConnectionPool(ref);
}
else if (ref.getClassName().equals(PoolingDataSource.class.getName()))
{
return loadPoolingDataSource(ref);
}
else
{
return null;
}
}
private Object loadPoolingDataSource(Reference ref)
{
// If DataSource exists, return it
String name = getProperty(ref, "dataSourceName");
PoolingDataSource pds = PoolingDataSource.getDataSource(name);
if (pds != null)
{
return pds;
}
// Otherwise, create a new one
pds = new PoolingDataSource();
pds.setDataSourceName(name);
loadBaseDataSource(pds, ref);
String min = getProperty(ref, "initialConnections");
if (min != null)
{
pds.setInitialConnections(Integer.parseInt(min));
}
String max = getProperty(ref, "maxConnections");
if (max != null)
{
pds.setMaxConnections(Integer.parseInt(max));
}
return pds;
}
private Object loadSimpleDataSource(Reference ref)
{
SimpleDataSource ds = new SimpleDataSource();
return loadBaseDataSource(ds, ref);
}
private Object loadConnectionPool(Reference ref)
{
ConnectionPool cp = new ConnectionPool();
return loadBaseDataSource(cp, ref);
}
protected Object loadBaseDataSource(BaseDataSource ds, Reference ref)
{
ds.setDatabaseName(getProperty(ref, "databaseName"));
ds.setPassword(getProperty(ref, "password"));
String port = getProperty(ref, "portNumber");
if (port != null)
{
ds.setPortNumber(Integer.parseInt(port));
}
ds.setServerName(getProperty(ref, "serverName"));
ds.setUser(getProperty(ref, "user"));
return ds;
}
protected String getProperty(Reference ref, String s)
{
RefAddr addr = ref.get(s);
if (addr == null)
{
return null;
}
return (String)addr.getContent();
}
}

View File

@ -1,392 +0,0 @@
package org.postgresql.jdbc2.optional;
import javax.sql.*;
import java.sql.*;
import java.util.*;
import java.lang.reflect.*;
import org.postgresql.PGConnection;
/**
* PostgreSQL implementation of the PooledConnection interface. This shouldn't
* be used directly, as the pooling client should just interact with the
* ConnectionPool instead.
* @see ConnectionPool
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
* @author Csaba Nagy (ncsaba@yahoo.com)
* @version $Revision: 1.8 $
*/
public class PooledConnectionImpl implements PooledConnection
{
private List listeners = new LinkedList();
private Connection con;
private ConnectionHandler last;
private boolean autoCommit;
/**
* Creates a new PooledConnection representing the specified physical
* connection.
*/
protected PooledConnectionImpl(Connection con, boolean autoCommit)
{
this.con = con;
this.autoCommit = autoCommit;
}
/**
* Adds a listener for close or fatal error events on the connection
* handed out to a client.
*/
public void addConnectionEventListener(ConnectionEventListener connectionEventListener)
{
listeners.add(connectionEventListener);
}
/**
* Removes a listener for close or fatal error events on the connection
* handed out to a client.
*/
public void removeConnectionEventListener(ConnectionEventListener connectionEventListener)
{
listeners.remove(connectionEventListener);
}
/**
* Closes the physical database connection represented by this
* PooledConnection. If any client has a connection based on
* this PooledConnection, it is forcibly closed as well.
*/
public void close() throws SQLException
{
if (last != null)
{
last.close();
if (!con.getAutoCommit())
{
try
{
con.rollback();
}
catch (SQLException e)
{}
}
}
try
{
con.close();
}
finally
{
con = null;
}
}
/**
* Gets a handle for a client to use. This is a wrapper around the
* physical connection, so the client can call close and it will just
* return the connection to the pool without really closing the
* pgysical connection.
*
* <p>According to the JDBC 2.0 Optional Package spec (6.2.3), only one
* client may have an active handle to the connection at a time, so if
* there is a previous handle active when this is called, the previous
* one is forcibly closed and its work rolled back.</p>
*/
public Connection getConnection() throws SQLException
{
if (con == null)
{
// Before throwing the exception, let's notify the registered listeners about the error
final SQLException sqlException = new SQLException("This PooledConnection has already been closed!");
fireConnectionFatalError(sqlException);
throw sqlException;
}
// If any error occures while opening a new connection, the listeners
// have to be notified. This gives a chance to connection pools to
// elliminate bad pooled connections.
try
{
// Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional Package spec section 6.2.3
if (last != null)
{
last.close();
if (!con.getAutoCommit())
{
try
{
con.rollback();
}
catch (SQLException e)
{}
}
con.clearWarnings();
}
con.setAutoCommit(autoCommit);
}
catch (SQLException sqlException)
{
fireConnectionFatalError(sqlException);
throw (SQLException)sqlException.fillInStackTrace();
}
ConnectionHandler handler = new ConnectionHandler(con);
last = handler;
Connection con = (Connection)Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{Connection.class, PGConnection.class}, handler);
last.setProxy(con);
return con;
}
/**
* Used to fire a connection closed event to all listeners.
*/
void fireConnectionClosed()
{
ConnectionEvent evt = null;
// Copy the listener list so the listener can remove itself during this method call
ConnectionEventListener[] local = (ConnectionEventListener[]) listeners.toArray(new ConnectionEventListener[listeners.size()]);
for (int i = 0; i < local.length; i++)
{
ConnectionEventListener listener = local[i];
if (evt == null)
{
evt = new ConnectionEvent(this);
}
listener.connectionClosed(evt);
}
}
/**
* Used to fire a connection error event to all listeners.
*/
void fireConnectionFatalError(SQLException e)
{
ConnectionEvent evt = null;
// Copy the listener list so the listener can remove itself during this method call
ConnectionEventListener[] local = (ConnectionEventListener[])listeners.toArray(new ConnectionEventListener[listeners.size()]);
for (int i = 0; i < local.length; i++)
{
ConnectionEventListener listener = local[i];
if (evt == null)
{
evt = new ConnectionEvent(this, e);
}
listener.connectionErrorOccurred(evt);
}
}
/**
* Instead of declaring a class implementing Connection, which would have
* to be updated for every JDK rev, use a dynamic proxy to handle all
* calls through the Connection interface. This is the part that
* requires JDK 1.3 or higher, though JDK 1.2 could be supported with a
* 3rd-party proxy package.
*/
private class ConnectionHandler implements InvocationHandler
{
private Connection con;
private Connection proxy; // the Connection the client is currently using, which is a proxy
private boolean automatic = false;
public ConnectionHandler(Connection con)
{
this.con = con;
}
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable
{
// From Object
if (method.getDeclaringClass().getName().equals("java.lang.Object"))
{
if (method.getName().equals("toString"))
{
return "Pooled connection wrapping physical connection " + con;
}
if (method.getName().equals("hashCode"))
{
return new Integer(con.hashCode());
}
if (method.getName().equals("equals"))
{
if (args[0] == null)
{
return Boolean.FALSE;
}
try
{
return Proxy.isProxyClass(args[0].getClass()) && ((ConnectionHandler) Proxy.getInvocationHandler(args[0])).con == con ? Boolean.TRUE : Boolean.FALSE;
}
catch (ClassCastException e)
{
return Boolean.FALSE;
}
}
try
{
return method.invoke(con, args);
}
catch (InvocationTargetException e)
{
throw e.getTargetException();
}
}
// All the rest is from the Connection or PGConnection interface
if (method.getName().equals("isClosed"))
{
return con == null ? Boolean.TRUE : Boolean.FALSE;
}
if (con == null)
{
throw new SQLException(automatic ? "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed" : "Connection has been closed");
}
if (method.getName().equals("close"))
{
SQLException ex = null;
if (!con.getAutoCommit())
{
try
{
con.rollback();
}
catch (SQLException e)
{
ex = e;
}
}
con.clearWarnings();
con = null;
proxy = null;
last = null;
fireConnectionClosed();
if (ex != null)
{
throw ex;
}
return null;
}
else if(method.getName().equals("createStatement"))
{
Statement st = (Statement)method.invoke(con, args);
return Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{Statement.class, org.postgresql.PGStatement.class}, new StatementHandler(this, st));
}
else if(method.getName().equals("prepareCall"))
{
Statement st = (Statement)method.invoke(con, args);
return Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{CallableStatement.class, org.postgresql.PGStatement.class}, new StatementHandler(this, st));
}
else if(method.getName().equals("prepareStatement"))
{
Statement st = (Statement)method.invoke(con, args);
return Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{PreparedStatement.class, org.postgresql.PGStatement.class}, new StatementHandler(this, st));
}
else
{
return method.invoke(con, args);
}
}
Connection getProxy() {
return proxy;
}
void setProxy(Connection proxy) {
this.proxy = proxy;
}
public void close()
{
if (con != null)
{
automatic = true;
}
con = null;
proxy = null;
// No close event fired here: see JDBC 2.0 Optional Package spec section 6.3
}
public boolean isClosed() {
return con == null;
}
}
/**
* Instead of declaring classes implementing Statement, PreparedStatement,
* and CallableStatement, which would have to be updated for every JDK rev,
* use a dynamic proxy to handle all calls through the Statement
* interfaces. This is the part that requires JDK 1.3 or higher, though
* JDK 1.2 could be supported with a 3rd-party proxy package.
*
* The StatementHandler is required in order to return the proper
* Connection proxy for the getConnection method.
*/
private static class StatementHandler implements InvocationHandler {
private ConnectionHandler con;
private Statement st;
public StatementHandler(ConnectionHandler con, Statement st) {
this.con = con;
this.st = st;
}
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable
{
// From Object
if (method.getDeclaringClass().getName().equals("java.lang.Object"))
{
if (method.getName().equals("toString"))
{
return "Pooled statement wrapping physical statement " + st;
}
if (method.getName().equals("hashCode"))
{
return new Integer(st.hashCode());
}
if (method.getName().equals("equals"))
{
if (args[0] == null)
{
return Boolean.FALSE;
}
try
{
return Proxy.isProxyClass(args[0].getClass()) && ((StatementHandler) Proxy.getInvocationHandler(args[0])).st == st ? Boolean.TRUE : Boolean.FALSE;
}
catch (ClassCastException e)
{
return Boolean.FALSE;
}
}
return method.invoke(st, args);
}
// All the rest is from the Statement interface
if (st == null || con.isClosed())
{
throw new SQLException("Statement has been closed");
}
if (method.getName().equals("close"))
{
try {
st.close();
} finally {
con = null;
st = null;
return null;
}
}
else if (method.getName().equals("getConnection"))
{
return con.getProxy(); // the proxied connection, not a physical connection
}
else
{
try
{
return method.invoke(st, args);
}
catch (InvocationTargetException e)
{
throw e.getTargetException();
}
}
}
}
}

View File

@ -1,494 +0,0 @@
package org.postgresql.jdbc2.optional;
import javax.sql.*;
import javax.naming.*;
import java.util.*;
import java.sql.Connection;
import java.sql.SQLException;
/**
* DataSource which uses connection pooling. <font color="red">Don't use this if
* your server/middleware vendor provides a connection pooling implementation
* which interfaces with the PostgreSQL ConnectionPoolDataSource implementation!</font>
* This class is provided as a convenience, but the JDBC Driver is really not
* supposed to handle the connection pooling algorithm. Instead, the server or
* middleware product is supposed to handle the mechanics of connection pooling,
* and use the PostgreSQL implementation of ConnectionPoolDataSource to provide
* the connections to pool.
*
* <p>If you're sure you want to use this, then you must set the properties
* dataSourceName, databaseName, user, and password (if required for the user).
* The settings for serverName, portNumber, initialConnections, and
* maxConnections are optional. Note that <i>only connections
* for the default user will be pooled!</i> Connections for other users will
* be normal non-pooled connections, and will not count against the maximum pool
* size limit.</p>
*
* <p>If you put this DataSource in JNDI, and access it from different JVMs (or
* otherwise load this class from different ClassLoaders), you'll end up with one
* pool per ClassLoader or VM. This is another area where a server-specific
* implementation may provide advanced features, such as using a single pool
* across all VMs in a cluster.</p>
*
* <p>This implementation supports JDK 1.3 and higher.</p>
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
* @version $Revision: 1.3 $
*/
public class PoolingDataSource extends BaseDataSource implements DataSource
{
private static Map dataSources = new HashMap();
static PoolingDataSource getDataSource(String name)
{
return (PoolingDataSource)dataSources.get(name);
}
// Additional Data Source properties
protected String dataSourceName; // Must be protected for subclasses to sync updates to it
private int initialConnections = 0;
private int maxConnections = 0;
// State variables
private boolean initialized = false;
private Stack available = new Stack();
private Stack used = new Stack();
private Object lock = new Object();
private ConnectionPool source;
/**
* Gets a description of this DataSource.
*/
public String getDescription()
{
return "Pooling DataSource '" + dataSourceName + " from " + org.postgresql.Driver.getVersion();
}
/**
* Ensures the DataSource properties are not changed after the DataSource has
* been used.
*
* @throws java.lang.IllegalStateException
* The Server Name cannot be changed after the DataSource has been
* used.
*/
public void setServerName(String serverName)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
super.setServerName(serverName);
}
/**
* Ensures the DataSource properties are not changed after the DataSource has
* been used.
*
* @throws java.lang.IllegalStateException
* The Database Name cannot be changed after the DataSource has been
* used.
*/
public void setDatabaseName(String databaseName)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
super.setDatabaseName(databaseName);
}
/**
* Ensures the DataSource properties are not changed after the DataSource has
* been used.
*
* @throws java.lang.IllegalStateException
* The User cannot be changed after the DataSource has been
* used.
*/
public void setUser(String user)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
super.setUser(user);
}
/**
* Ensures the DataSource properties are not changed after the DataSource has
* been used.
*
* @throws java.lang.IllegalStateException
* The Password cannot be changed after the DataSource has been
* used.
*/
public void setPassword(String password)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
super.setPassword(password);
}
/**
* Ensures the DataSource properties are not changed after the DataSource has
* been used.
*
* @throws java.lang.IllegalStateException
* The Port Number cannot be changed after the DataSource has been
* used.
*/
public void setPortNumber(int portNumber)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
super.setPortNumber(portNumber);
}
/**
* Gets the number of connections that will be created when this DataSource
* is initialized. If you do not call initialize explicitly, it will be
* initialized the first time a connection is drawn from it.
*/
public int getInitialConnections()
{
return initialConnections;
}
/**
* Sets the number of connections that will be created when this DataSource
* is initialized. If you do not call initialize explicitly, it will be
* initialized the first time a connection is drawn from it.
*
* @throws java.lang.IllegalStateException
* The Initial Connections cannot be changed after the DataSource has been
* used.
*/
public void setInitialConnections(int initialConnections)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
this.initialConnections = initialConnections;
}
/**
* Gets the maximum number of connections that the pool will allow. If a request
* comes in and this many connections are in use, the request will block until a
* connection is available. Note that connections for a user other than the
* default user will not be pooled and don't count against this limit.
*
* @return The maximum number of pooled connection allowed, or 0 for no maximum.
*/
public int getMaxConnections()
{
return maxConnections;
}
/**
* Sets the maximum number of connections that the pool will allow. If a request
* comes in and this many connections are in use, the request will block until a
* connection is available. Note that connections for a user other than the
* default user will not be pooled and don't count against this limit.
*
* @param maxConnections The maximum number of pooled connection to allow, or
* 0 for no maximum.
*
* @throws java.lang.IllegalStateException
* The Maximum Connections cannot be changed after the DataSource has been
* used.
*/
public void setMaxConnections(int maxConnections)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
this.maxConnections = maxConnections;
}
/**
* Gets the name of this DataSource. This uniquely identifies the DataSource.
* You cannot use more than one DataSource in the same VM with the same name.
*/
public String getDataSourceName()
{
return dataSourceName;
}
/**
* Sets the name of this DataSource. This is required, and uniquely identifies
* the DataSource. You cannot create or use more than one DataSource in the
* same VM with the same name.
*
* @throws java.lang.IllegalStateException
* The Data Source Name cannot be changed after the DataSource has been
* used.
* @throws java.lang.IllegalArgumentException
* Another PoolingDataSource with the same dataSourceName already
* exists.
*/
public void setDataSourceName(String dataSourceName)
{
if (initialized)
{
throw new IllegalStateException("Cannot set Data Source properties after DataSource has been used");
}
if (this.dataSourceName != null && dataSourceName != null && dataSourceName.equals(this.dataSourceName))
{
return ;
}
synchronized (dataSources)
{
if (getDataSource(dataSourceName) != null)
{
throw new IllegalArgumentException("DataSource with name '" + dataSourceName + "' already exists!");
}
if (this.dataSourceName != null)
{
dataSources.remove(this.dataSourceName);
}
this.dataSourceName = dataSourceName;
dataSources.put(dataSourceName, this);
}
}
/**
* Initializes this DataSource. If the initialConnections is greater than zero,
* that number of connections will be created. After this method is called,
* the DataSource properties cannot be changed. If you do not call this
* explicitly, it will be called the first time you get a connection from the
* DataSource.
* @throws java.sql.SQLException
* Occurs when the initialConnections is greater than zero, but the
* DataSource is not able to create enough physical connections.
*/
public void initialize() throws SQLException
{
synchronized (lock)
{
source = createConnectionPool();
source.setDatabaseName(getDatabaseName());
source.setPassword(getPassword());
source.setPortNumber(getPortNumber());
source.setServerName(getServerName());
source.setUser(getUser());
while (available.size() < initialConnections)
{
available.push(source.getPooledConnection());
}
initialized = true;
}
}
protected boolean isInitialized() {
return initialized;
}
/**
* Creates the appropriate ConnectionPool to use for this DataSource.
*/
protected ConnectionPool createConnectionPool() {
return new ConnectionPool();
}
/**
* Gets a <b>non-pooled</b> connection, unless the user and password are the
* same as the default values for this connection pool.
*
* @return A pooled connection.
* @throws SQLException
* Occurs when no pooled connection is available, and a new physical
* connection cannot be created.
*/
public Connection getConnection(String user, String password) throws SQLException
{
// If this is for the default user/password, use a pooled connection
if (user == null ||
(user.equals(getUser()) && ((password == null && getPassword() == null) || (password != null && password.equals(getPassword())))))
{
return getConnection();
}
// Otherwise, use a non-pooled connection
if (!initialized)
{
initialize();
}
return super.getConnection(user, password);
}
/**
* Gets a connection from the connection pool.
*
* @return A pooled connection.
* @throws SQLException
* Occurs when no pooled connection is available, and a new physical
* connection cannot be created.
*/
public Connection getConnection() throws SQLException
{
if (!initialized)
{
initialize();
}
return getPooledConnection();
}
/**
* Closes this DataSource, and all the pooled connections, whether in use or not.
*/
public void close()
{
synchronized (lock)
{
while (available.size() > 0)
{
PooledConnectionImpl pci = (PooledConnectionImpl)available.pop();
try
{
pci.close();
}
catch (SQLException e)
{}
}
available = null;
while (used.size() > 0)
{
PooledConnectionImpl pci = (PooledConnectionImpl)used.pop();
pci.removeConnectionEventListener(connectionEventListener);
try
{
pci.close();
}
catch (SQLException e)
{}
}
used = null;
}
removeStoredDataSource();
}
protected void removeStoredDataSource() {
synchronized (dataSources)
{
dataSources.remove(dataSourceName);
}
}
/**
* Gets a connection from the pool. Will get an available one if
* present, or create a new one if under the max limit. Will
* block if all used and a new one would exceed the max.
*/
private Connection getPooledConnection() throws SQLException
{
PooledConnection pc = null;
synchronized (lock)
{
if (available == null)
{
throw new SQLException("DataSource has been closed.");
}
while (true)
{
if (available.size() > 0)
{
pc = (PooledConnection)available.pop();
used.push(pc);
break;
}
if (maxConnections == 0 || used.size() < maxConnections)
{
pc = source.getPooledConnection();
used.push(pc);
break;
}
else
{
try
{
// Wake up every second at a minimum
lock.wait(1000L);
}
catch (InterruptedException e)
{}
}
}
}
pc.addConnectionEventListener(connectionEventListener);
return pc.getConnection();
}
/**
* Notified when a pooled connection is closed, or a fatal error occurs
* on a pooled connection. This is the only way connections are marked
* as unused.
*/
private ConnectionEventListener connectionEventListener = new ConnectionEventListener()
{
public void connectionClosed(ConnectionEvent event)
{
((PooledConnection)event.getSource()).removeConnectionEventListener(this);
synchronized (lock)
{
if (available == null)
{
return ; // DataSource has been closed
}
boolean removed = used.remove(event.getSource());
if (removed)
{
available.push(event.getSource());
// There's now a new connection available
lock.notify();
}
else
{
// a connection error occured
}
}
}
/**
* This is only called for fatal errors, where the physical connection is
* useless afterward and should be removed from the pool.
*/
public void connectionErrorOccurred(ConnectionEvent event)
{
((PooledConnection) event.getSource()).removeConnectionEventListener(this);
synchronized (lock)
{
if (available == null)
{
return ; // DataSource has been closed
}
used.remove(event.getSource());
// We're now at least 1 connection under the max
lock.notify();
}
}
};
/**
* Adds custom properties for this DataSource to the properties defined in
* the superclass.
*/
public Reference getReference() throws NamingException
{
Reference ref = super.getReference();
ref.add(new StringRefAddr("dataSourceName", dataSourceName));
if (initialConnections > 0)
{
ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections)));
}
if (maxConnections > 0)
{
ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections)));
}
return ref;
}
}

View File

@ -1,24 +0,0 @@
package org.postgresql.jdbc2.optional;
import javax.sql.DataSource;
import java.io.Serializable;
/**
* Simple DataSource which does not perform connection pooling. In order to use
* the DataSource, you must set the property databaseName. The settings for
* serverName, portNumber, user, and password are optional. Note: these properties
* are declared in the superclass.
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
* @version $Revision: 1.2 $
*/
public class SimpleDataSource extends BaseDataSource implements Serializable, DataSource
{
/**
* Gets a description of this DataSource.
*/
public String getDescription()
{
return "Non-Pooling DataSource from " + org.postgresql.Driver.getVersion();
}
}

View File

@ -1,194 +0,0 @@
/*-------------------------------------------------------------------------
*
* BlobInputStream.java
* This is an implementation of an InputStream from a large object.
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/interfaces/jdbc/org/postgresql/largeobject/BlobInputStream.java,v 1.6 2003/11/29 19:52:11 pgsql Exp $
*
*-------------------------------------------------------------------------
*/
package org.postgresql.largeobject;
import java.io.InputStream;
import java.io.IOException;
import java.sql.SQLException;
public class BlobInputStream extends InputStream
{
/*
* The parent LargeObject
*/
private LargeObject lo;
/*
* Buffer used to improve performance
*/
private byte[] buffer;
/*
* Position within buffer
*/
private int bpos;
/*
* The buffer size
*/
private int bsize;
/*
* The mark position
*/
private int mpos = 0;
/*
* @param lo LargeObject to read from
*/
public BlobInputStream(LargeObject lo)
{
this(lo, 1024);
}
/*
* @param lo LargeObject to read from
* @param bsize buffer size
*/
public BlobInputStream(LargeObject lo, int bsize)
{
this.lo = lo;
buffer = null;
bpos = 0;
this.bsize = bsize;
}
/*
* The minimum required to implement input stream
*/
public int read() throws java.io.IOException
{
try
{
if (buffer == null || bpos >= buffer.length)
{
buffer = lo.read(bsize);
bpos = 0;
}
// Handle EOF
if (bpos >= buffer.length)
{
return -1;
}
int ret = (buffer[bpos] & 0x7F);
if ((buffer[bpos] &0x80) == 0x80)
{
ret |= 0x80;
}
bpos++;
return ret;
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
/*
* Closes this input stream and releases any system resources associated
* with the stream.
*
* <p> The <code>close</code> method of <code>InputStream</code> does
* nothing.
*
* @exception IOException if an I/O error occurs.
*/
public void close() throws IOException
{
try
{
lo.close();
lo = null;
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
/*
* Marks the current position in this input stream. A subsequent call to
* the <code>reset</code> method repositions this stream at the last marked
* position so that subsequent reads re-read the same bytes.
*
* <p> The <code>readlimit</code> arguments tells this input stream to
* allow that many bytes to be read before the mark position gets
* invalidated.
*
* <p> The general contract of <code>mark</code> is that, if the method
* <code>markSupported</code> returns <code>true</code>, the stream somehow
* remembers all the bytes read after the call to <code>mark</code> and
* stands ready to supply those same bytes again if and whenever the method
* <code>reset</code> is called. However, the stream is not required to
* remember any data at all if more than <code>readlimit</code> bytes are
* read from the stream before <code>reset</code> is called.
*
* <p> The <code>mark</code> method of <code>InputStream</code> does
* nothing.
*
* @param readlimit the maximum limit of bytes that can be read before
* the mark position becomes invalid.
* @see java.io.InputStream#reset()
*/
public synchronized void mark(int readlimit)
{
try
{
mpos = lo.tell();
}
catch (SQLException se)
{
//throw new IOException(se.toString());
}
}
/*
* Repositions this stream to the position at the time the
* <code>mark</code> method was last called on this input stream.
* NB: If mark is not called we move to the begining.
* @see java.io.InputStream#mark(int)
* @see java.io.IOException
*/
public synchronized void reset()
throws IOException
{
try
{
lo.seek(mpos);
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
/*
* Tests if this input stream supports the <code>mark</code> and
* <code>reset</code> methods. The <code>markSupported</code> method of
* <code>InputStream</code> returns <code>false</code>.
*
* @return <code>true</code> if this true type supports the mark and reset
* method; <code>false</code> otherwise.
* @see java.io.InputStream#mark(int)
* @see java.io.InputStream#reset()
*/
public boolean markSupported()
{
return true;
}
}

View File

@ -1,148 +0,0 @@
/*-------------------------------------------------------------------------
*
* BlobOutputStream.java
* This implements a basic output stream that writes to a LargeObject
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/interfaces/jdbc/org/postgresql/largeobject/BlobOutputStream.java,v 1.7 2003/11/29 19:52:11 pgsql Exp $
*
*-------------------------------------------------------------------------
*/
package org.postgresql.largeobject;
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
public class BlobOutputStream extends OutputStream
{
/*
* The parent LargeObject
*/
private LargeObject lo;
/*
* Buffer
*/
private byte buf[];
/*
* Size of the buffer (default 1K)
*/
private int bsize;
/*
* Position within the buffer
*/
private int bpos;
/*
* Create an OutputStream to a large object
* @param lo LargeObject
*/
public BlobOutputStream(LargeObject lo)
{
this(lo, 1024);
}
/*
* Create an OutputStream to a large object
* @param lo LargeObject
* @param bsize The size of the buffer used to improve performance
*/
public BlobOutputStream(LargeObject lo, int bsize)
{
this.lo = lo;
this.bsize = bsize;
buf = new byte[bsize];
bpos = 0;
}
public void write(int b) throws java.io.IOException
{
try
{
if (bpos >= bsize)
{
lo.write(buf);
bpos = 0;
}
buf[bpos++] = (byte)b;
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
public void write(byte[] buf, int off, int len) throws java.io.IOException
{
try
{
// If we have any internally buffered data, send it first
if ( bpos > 0 )
flush();
if ( off == 0 && len == buf.length )
lo.write(buf); // save a buffer creation and copy since full buffer written
else
lo.write(buf,off,len);
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
/*
* Flushes this output stream and forces any buffered output bytes
* to be written out. The general contract of <code>flush</code> is
* that calling it is an indication that, if any bytes previously
* written have been buffered by the implementation of the output
* stream, such bytes should immediately be written to their
* intended destination.
*
* @exception IOException if an I/O error occurs.
*/
public void flush() throws IOException
{
try
{
if (bpos > 0)
lo.write(buf, 0, bpos);
bpos = 0;
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
/*
* Closes this output stream and releases any system resources
* associated with this stream. The general contract of <code>close</code>
* is that it closes the output stream. A closed stream cannot perform
* output operations and cannot be reopened.
* <p>
* The <code>close</code> method of <code>OutputStream</code> does nothing.
*
* @exception IOException if an I/O error occurs.
*/
public void close() throws IOException
{
try
{
flush();
lo.close();
lo = null;
}
catch (SQLException se)
{
throw new IOException(se.toString());
}
}
}

View File

@ -1,328 +0,0 @@
/*-------------------------------------------------------------------------
*
* LargeObject.java
* This class implements the large object interface to org.postgresql.
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/interfaces/jdbc/org/postgresql/largeobject/LargeObject.java,v 1.11 2003/11/29 19:52:11 pgsql Exp $
*
*-------------------------------------------------------------------------
*/
package org.postgresql.largeobject;
import java.io.InputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
import org.postgresql.fastpath.Fastpath;
import org.postgresql.fastpath.FastpathArg;
/*
* This class provides the basic methods required to run the interface, plus
* a pair of methods that provide InputStream and OutputStream classes
* for this object.
*
* <p>Normally, client code would use the getAsciiStream, getBinaryStream,
* or getUnicodeStream methods in ResultSet, or setAsciiStream,
* setBinaryStream, or setUnicodeStream methods in PreparedStatement to
* access Large Objects.
*
* <p>However, sometimes lower level access to Large Objects are required,
* that are not supported by the JDBC specification.
*
* <p>Refer to org.postgresql.largeobject.LargeObjectManager on how to gain access
* to a Large Object, or how to create one.
*
* @see org.postgresql.largeobject.LargeObjectManager
* @see java.sql.ResultSet#getAsciiStream
* @see java.sql.ResultSet#getBinaryStream
* @see java.sql.ResultSet#getUnicodeStream
* @see java.sql.PreparedStatement#setAsciiStream
* @see java.sql.PreparedStatement#setBinaryStream
* @see java.sql.PreparedStatement#setUnicodeStream
*
*/
public class LargeObject
{
/*
* Indicates a seek from the begining of a file
*/
public static final int SEEK_SET = 0;
/*
* Indicates a seek from the current position
*/
public static final int SEEK_CUR = 1;
/*
* Indicates a seek from the end of a file
*/
public static final int SEEK_END = 2;
private Fastpath fp; // Fastpath API to use
private int oid; // OID of this object
private int fd; // the descriptor of the open large object
private BlobOutputStream os; // The current output stream
private boolean closed = false; // true when we are closed
/*
* This opens a large object.
*
* <p>If the object does not exist, then an SQLException is thrown.
*
* @param fp FastPath API for the connection to use
* @param oid of the Large Object to open
* @param mode Mode of opening the large object
* (defined in LargeObjectManager)
* @exception SQLException if a database-access error occurs.
* @see org.postgresql.largeobject.LargeObjectManager
*/
protected LargeObject(Fastpath fp, int oid, int mode) throws SQLException
{
this.fp = fp;
this.oid = oid;
FastpathArg args[] = new FastpathArg[2];
args[0] = new FastpathArg(oid);
args[1] = new FastpathArg(mode);
this.fd = fp.getInteger("lo_open", args);
}
/* Release large object resources during garbage cleanup */
protected void finalize() throws SQLException
{
//This code used to call close() however that was problematic
//because the scope of the fd is a transaction, thus if commit
//or rollback was called before garbage collection ran then
//the call to close would error out with an invalid large object
//handle. So this method now does nothing and lets the server
//handle cleanup when it ends the transaction.
}
/*
* @return the OID of this LargeObject
*/
public int getOID()
{
return oid;
}
/*
* This method closes the object. You must not call methods in this
* object after this is called.
* @exception SQLException if a database-access error occurs.
*/
public void close() throws SQLException
{
if (!closed)
{
// flush any open output streams
if (os != null)
{
try
{
// we can't call os.close() otherwise we go into an infinite loop!
os.flush();
}
catch (IOException ioe)
{
throw new SQLException(ioe.getMessage());
}
finally
{
os = null;
}
}
// finally close
FastpathArg args[] = new FastpathArg[1];
args[0] = new FastpathArg(fd);
fp.fastpath("lo_close", false, args); // true here as we dont care!!
closed = true;
}
}
/*
* Reads some data from the object, and return as a byte[] array
*
* @param len number of bytes to read
* @return byte[] array containing data read
* @exception SQLException if a database-access error occurs.
*/
public byte[] read(int len) throws SQLException
{
// This is the original method, where the entire block (len bytes)
// is retrieved in one go.
FastpathArg args[] = new FastpathArg[2];
args[0] = new FastpathArg(fd);
args[1] = new FastpathArg(len);
return fp.getData("loread", args);
// This version allows us to break this down into 4k blocks
//if (len<=4048) {
//// handle as before, return the whole block in one go
//FastpathArg args[] = new FastpathArg[2];
//args[0] = new FastpathArg(fd);
//args[1] = new FastpathArg(len);
//return fp.getData("loread",args);
//} else {
//// return in 4k blocks
//byte[] buf=new byte[len];
//int off=0;
//while (len>0) {
//int bs=4048;
//len-=bs;
//if (len<0) {
//bs+=len;
//len=0;
//}
//read(buf,off,bs);
//off+=bs;
//}
//return buf;
//}
}
/*
* Reads some data from the object into an existing array
*
* @param buf destination array
* @param off offset within array
* @param len number of bytes to read
* @return the number of bytes actually read
* @exception SQLException if a database-access error occurs.
*/
public int read(byte buf[], int off, int len) throws SQLException
{
byte b[] = read(len);
if (b.length < len)
len = b.length;
System.arraycopy(b, 0, buf, off, len);
return len;
}
/*
* Writes an array to the object
*
* @param buf array to write
* @exception SQLException if a database-access error occurs.
*/
public void write(byte buf[]) throws SQLException
{
FastpathArg args[] = new FastpathArg[2];
args[0] = new FastpathArg(fd);
args[1] = new FastpathArg(buf);
fp.fastpath("lowrite", false, args);
}
/*
* Writes some data from an array to the object
*
* @param buf destination array
* @param off offset within array
* @param len number of bytes to write
* @exception SQLException if a database-access error occurs.
*/
public void write(byte buf[], int off, int len) throws SQLException
{
byte data[] = new byte[len];
System.arraycopy(buf, off, data, 0, len);
write(data);
}
/*
* Sets the current position within the object.
*
* <p>This is similar to the fseek() call in the standard C library. It
* allows you to have random access to the large object.
*
* @param pos position within object
* @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
* @exception SQLException if a database-access error occurs.
*/
public void seek(int pos, int ref) throws SQLException
{
FastpathArg args[] = new FastpathArg[3];
args[0] = new FastpathArg(fd);
args[1] = new FastpathArg(pos);
args[2] = new FastpathArg(ref);
fp.fastpath("lo_lseek", false, args);
}
/*
* Sets the current position within the object.
*
* <p>This is similar to the fseek() call in the standard C library. It
* allows you to have random access to the large object.
*
* @param pos position within object from begining
* @exception SQLException if a database-access error occurs.
*/
public void seek(int pos) throws SQLException
{
seek(pos, SEEK_SET);
}
/*
* @return the current position within the object
* @exception SQLException if a database-access error occurs.
*/
public int tell() throws SQLException
{
FastpathArg args[] = new FastpathArg[1];
args[0] = new FastpathArg(fd);
return fp.getInteger("lo_tell", args);
}
/*
* This method is inefficient, as the only way to find out the size of
* the object is to seek to the end, record the current position, then
* return to the original position.
*
* <p>A better method will be found in the future.
*
* @return the size of the large object
* @exception SQLException if a database-access error occurs.
*/
public int size() throws SQLException
{
int cp = tell();
seek(0, SEEK_END);
int sz = tell();
seek(cp, SEEK_SET);
return sz;
}
/*
* Returns an InputStream from this object.
*
* <p>This InputStream can then be used in any method that requires an
* InputStream.
*
* @exception SQLException if a database-access error occurs.
*/
public InputStream getInputStream() throws SQLException
{
return new BlobInputStream(this, 4096);
}
/*
* Returns an OutputStream to this object
*
* <p>This OutputStream can then be used in any method that requires an
* OutputStream.
*
* @exception SQLException if a database-access error occurs.
*/
public OutputStream getOutputStream() throws SQLException
{
if (os == null)
os = new BlobOutputStream(this, 4096);
return os;
}
}

View File

@ -1,229 +0,0 @@
/*-------------------------------------------------------------------------
*
* LargeObjectManager.java
* This class implements the large object interface to org.postgresql.
*
* It provides methods that allow client code to create, open and delete
* large objects from the database. When opening an object, an instance of
* org.postgresql.largeobject.LargeObject is returned, and its methods
* then allow access to the object.
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/interfaces/jdbc/org/postgresql/largeobject/LargeObjectManager.java,v 1.12 2003/12/17 15:38:42 davec Exp $
*
*-------------------------------------------------------------------------
*/
package org.postgresql.largeobject;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.postgresql.Driver;
import org.postgresql.core.BaseConnection;
import org.postgresql.fastpath.Fastpath;
import org.postgresql.fastpath.FastpathArg;
import org.postgresql.util.PSQLException;
/*
* This class implements the large object interface to org.postgresql.
*
* <p>It provides methods that allow client code to create, open and delete
* large objects from the database. When opening an object, an instance of
* org.postgresql.largeobject.LargeObject is returned, and its methods then allow
* access to the object.
*
* <p>This class can only be created by org.postgresql.Connection
*
* <p>To get access to this class, use the following segment of code:
* <br><pre>
* import org.postgresql.largeobject.*;
*
* Connection conn;
* LargeObjectManager lobj;
*
* ... code that opens a connection ...
*
* lobj = ((org.postgresql.PGConnection)myconn).getLargeObjectAPI();
* </pre>
*
* <p>Normally, client code would use the getAsciiStream, getBinaryStream,
* or getUnicodeStream methods in ResultSet, or setAsciiStream,
* setBinaryStream, or setUnicodeStream methods in PreparedStatement to
* access Large Objects.
*
* <p>However, sometimes lower level access to Large Objects are required,
* that are not supported by the JDBC specification.
*
* <p>Refer to org.postgresql.largeobject.LargeObject on how to manipulate the
* contents of a Large Object.
*
* @see java.sql.ResultSet#getAsciiStream
* @see java.sql.ResultSet#getBinaryStream
* @see java.sql.ResultSet#getUnicodeStream
* @see java.sql.PreparedStatement#setAsciiStream
* @see java.sql.PreparedStatement#setBinaryStream
* @see java.sql.PreparedStatement#setUnicodeStream
*/
public class LargeObjectManager
{
// the fastpath api for this connection
private Fastpath fp;
/*
* This mode indicates we want to write to an object
*/
public static final int WRITE = 0x00020000;
/*
* This mode indicates we want to read an object
*/
public static final int READ = 0x00040000;
/*
* This mode is the default. It indicates we want read and write access to
* a large object
*/
public static final int READWRITE = READ | WRITE;
/*
* This prevents us being created by mere mortals
*/
private LargeObjectManager()
{}
/*
* Constructs the LargeObject API.
*
* <p><b>Important Notice</b>
* <br>This method should only be called by org.postgresql.Connection
*
* <p>There should only be one LargeObjectManager per Connection. The
* org.postgresql.Connection class keeps track of the various extension API's
* and it's advised you use those to gain access, and not going direct.
*/
public LargeObjectManager(BaseConnection conn) throws SQLException
{
// We need Fastpath to do anything
this.fp = conn.getFastpathAPI();
// Now get the function oid's for the api
//
// This is an example of Fastpath.addFunctions();
//
String sql;
if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
sql = "SELECT p.proname,p.oid "+
" FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n "+
" WHERE p.pronamespace=n.oid AND n.nspname='pg_catalog' AND (";
} else {
sql = "SELECT proname,oid FROM pg_proc WHERE ";
}
sql += " proname = 'lo_open'" +
" or proname = 'lo_close'" +
" or proname = 'lo_creat'" +
" or proname = 'lo_unlink'" +
" or proname = 'lo_lseek'" +
" or proname = 'lo_tell'" +
" or proname = 'loread'" +
" or proname = 'lowrite'";
if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
sql += ")";
}
ResultSet res = conn.createStatement().executeQuery(sql);
if (res == null)
throw new PSQLException("postgresql.lo.init");
fp.addFunctions(res);
res.close();
if (Driver.logDebug)
Driver.debug("Large Object initialised");
}
/*
* This opens an existing large object, based on its OID. This method
* assumes that READ and WRITE access is required (the default).
*
* @param oid of large object
* @return LargeObject instance providing access to the object
* @exception SQLException on error
*/
public LargeObject open(int oid) throws SQLException
{
return new LargeObject(fp, oid, READWRITE);
}
/*
* This opens an existing large object, based on its OID
*
* @param oid of large object
* @param mode mode of open
* @return LargeObject instance providing access to the object
* @exception SQLException on error
*/
public LargeObject open(int oid, int mode) throws SQLException
{
return new LargeObject(fp, oid, mode);
}
/*
* This creates a large object, returning its OID.
*
* <p>It defaults to READWRITE for the new object's attributes.
*
* @return oid of new object
* @exception SQLException on error
*/
public int create() throws SQLException
{
FastpathArg args[] = new FastpathArg[1];
args[0] = new FastpathArg(READWRITE);
return fp.getInteger("lo_creat", args);
}
/*
* This creates a large object, returning its OID
*
* @param mode a bitmask describing different attributes of the new object
* @return oid of new object
* @exception SQLException on error
*/
public int create(int mode) throws SQLException
{
FastpathArg args[] = new FastpathArg[1];
args[0] = new FastpathArg(mode);
return fp.getInteger("lo_creat", args);
}
/*
* This deletes a large object.
*
* @param oid describing object to delete
* @exception SQLException on error
*/
public void delete(int oid) throws SQLException
{
FastpathArg args[] = new FastpathArg[1];
args[0] = new FastpathArg(oid);
fp.fastpath("lo_unlink", false, args);
}
/*
* This deletes a large object.
*
* <p>It is identical to the delete method, and is supplied as the C API uses
* unlink.
*
* @param oid describing object to delete
* @exception SQLException on error
*/
public void unlink(int oid) throws SQLException
{
delete(oid);
}
}