abstractjdbc1resultset.java

来自「PostgreSQL7.4.6 for Linux」· Java 代码 · 共 1,260 行 · 第 1/3 页

JAVA
1,260
字号
/*------------------------------------------------------------------------- * * AbstractJdbc1ResultSet.java *     This class defines methods of the jdbc1 specification.  This class is *     extended by org.postgresql.jdbc2.AbstractJdbc2ResultSet which adds the *     jdbc2 methods.  The real ResultSet class (for jdbc1) is  *     org.postgresql.jdbc1.Jdbc1ResultSet * * Copyright (c) 2003, PostgreSQL Global Development Group * * IDENTIFICATION *	  $Header: /cvsroot/pgsql/src/interfaces/jdbc/org/postgresql/jdbc1/Attic/AbstractJdbc1ResultSet.java,v 1.22.2.4 2004/06/21 03:11:37 jurka Exp $ * *------------------------------------------------------------------------- */package org.postgresql.jdbc1;import java.math.BigDecimal;import java.io.*;import java.sql.*;import java.text.ParseException;import java.text.SimpleDateFormat;import java.util.Vector;import org.postgresql.Driver;import org.postgresql.core.BaseConnection;import org.postgresql.core.BaseResultSet;import org.postgresql.core.BaseStatement;import org.postgresql.core.Field;import org.postgresql.core.Encoding;import org.postgresql.core.QueryExecutor;import org.postgresql.largeobject.*;import org.postgresql.util.PGbytea;import org.postgresql.util.PGtokenizer;import org.postgresql.util.PSQLException;import org.postgresql.util.PSQLState;public abstract class AbstractJdbc1ResultSet implements BaseResultSet{	protected Vector rows;			// The results	protected BaseStatement statement;	protected Field fields[];		// The field descriptions	protected String status;		// Status of the result	protected int updateCount;		// How many rows did we get back?	protected long insertOID;		// The oid of an inserted row	protected int current_row;		// Our pointer to where we are at	protected byte[][] this_row;		// the current row result	protected BaseConnection connection;	// the connection which we returned from	protected SQLWarning warnings = null;	// The warning chain	protected boolean wasNullFlag = false;	// the flag for wasNull()	// We can chain multiple resultSets together - this points to	// next resultSet in the chain.	protected BaseResultSet next = null;	private StringBuffer sbuf = null;	public byte[][] rowBuffer = null; 	private SimpleDateFormat m_tsFormat = null; 	private SimpleDateFormat m_tstzFormat = null; 	private SimpleDateFormat m_dateFormat = null;	private int fetchSize;      // Fetch size for next read (might be 0).	private int lastFetchSize;  // Fetch size of last read (might be 0).	public abstract ResultSetMetaData getMetaData() throws SQLException;	public AbstractJdbc1ResultSet(BaseStatement statement,				      Field[] fields,				      Vector tuples,				      String status,				      int updateCount,				      long insertOID)	{		this.connection = statement.getPGConnection();		this.statement = statement;		this.fields = fields;		this.rows = tuples;		this.status = status;		this.updateCount = updateCount;		this.insertOID = insertOID;		this.this_row = null;		this.current_row = -1;		this.lastFetchSize = this.fetchSize = (statement == null ? 0 : statement.getFetchSize());	}    public BaseStatement getPGStatement() {		return statement;	}	public StringBuffer getStringBuffer() {		return sbuf;	}	//This is implemented in jdbc2	public void setStatement(BaseStatement statement) {	}	//method to reinitialize a result set with more data	public void reInit (Field[] fields, Vector tuples, String status,			  int updateCount, long insertOID)	{		this.fields = fields;		// on a reinit the size of this indicates how many we pulled		// back. If it's 0 then the res set has ended.		this.rows = tuples;		this.status = status;		this.updateCount = updateCount;		this.insertOID = insertOID;		this.this_row = null;		this.current_row = -1;	}	//	// Part of the JDBC2 support, but convenient to implement here.	//  	public void setFetchSize(int rows) throws SQLException	{		fetchSize = rows;	}	public int getFetchSize() throws SQLException	{		return fetchSize;	}	public boolean next() throws SQLException	{		if (rows == null)			throw new PSQLException("postgresql.con.closed", PSQLState.CONNECTION_DOES_NOT_EXIST);		if (current_row+1 >= rows.size())		{ 			String cursorName = statement.getFetchingCursorName();			if (cursorName == null || lastFetchSize == 0 || rows.size() < lastFetchSize) {				current_row = rows.size();				this_row = null;				rowBuffer = null;				return false;  // Not doing a cursor-based fetch or the last fetch was the end of the query			}   			// Use the ref to the statement to get  			// the details we need to do another cursor  			// query - it will use reinit() to repopulate this  			// with the right data.  			// NB: We can reach this point with fetchSize == 0  			// if the fetch size is changed halfway through reading results. 			// Use "FETCH FORWARD ALL" in that case to complete the query. 			String[] sql = new String[] { 				fetchSize == 0 ? ("FETCH FORWARD ALL FROM " + cursorName) : 				("FETCH FORWARD " + fetchSize + " FROM " + cursorName) 			};   			QueryExecutor.execute(sql, 								  new String[0],  								  this);    			// Test the new rows array. 			lastFetchSize = fetchSize;  			if (rows.size() == 0) {				this_row = null;				rowBuffer = null;  				return false;			}			// Otherwise reset the counter and let it go on...			current_row = 0;		} else {			current_row++;		}		this_row = (byte [][])rows.elementAt(current_row);		rowBuffer = new byte[this_row.length][];		System.arraycopy(this_row, 0, rowBuffer, 0, this_row.length);		return true;	}	public void close() throws SQLException	{		//release resources held (memory for tuples)		if (rows != null)		{			rows = null;		}	}	public boolean wasNull() throws SQLException	{		return wasNullFlag;	}	public String getString(int columnIndex) throws SQLException	{		checkResultSet( columnIndex );		wasNullFlag = (this_row[columnIndex - 1] == null);		if (wasNullFlag)			return null;		Encoding encoding = connection.getEncoding();		return trimString(columnIndex, encoding.decode(this_row[columnIndex-1]));	}	public boolean getBoolean(int columnIndex) throws SQLException	{		return toBoolean( getString(columnIndex) );	}	public byte getByte(int columnIndex) throws SQLException	{		String s = getString(columnIndex);		if (s != null)		{			try			{				switch(fields[columnIndex-1].getSQLType())				{					case Types.NUMERIC:					case Types.REAL:					case Types.DOUBLE:					case Types.FLOAT:					case Types.DECIMAL:						int loc = s.indexOf(".");						if (loc!=-1 && Integer.parseInt(s.substring(loc+1,s.length()))==0)						{							s = s.substring(0,loc);						}						break;					case Types.CHAR:						s = s.trim();						break;				}				return Byte.parseByte(s);			}			catch (NumberFormatException e)			{				throw new PSQLException("postgresql.res.badbyte", PSQLState.NUMERIC_VALUE_OUT_OF_RANGE, s);			}		}		return 0; // SQL NULL	}	public short getShort(int columnIndex) throws SQLException	{		String s = getFixedString(columnIndex);		if (s != null)		{			try			{				switch(fields[columnIndex-1].getSQLType())				{					case Types.NUMERIC:					case Types.REAL:					case Types.DOUBLE:					case Types.FLOAT:					case Types.DECIMAL:						int loc = s.indexOf(".");						if (loc!=-1 && Integer.parseInt(s.substring(loc+1,s.length()))==0)						{							s = s.substring(0,loc);						}						break;					case Types.CHAR:						s = s.trim();						break;				}				return Short.parseShort(s);			}			catch (NumberFormatException e)			{				throw new PSQLException("postgresql.res.badshort", PSQLState.NUMERIC_VALUE_OUT_OF_RANGE, s);			}		}		return 0; // SQL NULL	}	public int getInt(int columnIndex) throws SQLException	{		return toInt( getFixedString(columnIndex) );	}	public long getLong(int columnIndex) throws SQLException	{		return toLong( getFixedString(columnIndex) );	}	public float getFloat(int columnIndex) throws SQLException	{		return toFloat( getFixedString(columnIndex) );	}	public double getDouble(int columnIndex) throws SQLException	{		return toDouble( getFixedString(columnIndex) );	}	public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException	{		return toBigDecimal( getFixedString(columnIndex), scale );	}	/*	 * Get the value of a column in the current row as a Java byte array.	 *	 * <p>In normal use, the bytes represent the raw values returned by the	 * backend. However, if the column is an OID, then it is assumed to	 * refer to a Large Object, and that object is returned as a byte array.	 *	 * <p><b>Be warned</b> If the large object is huge, then you may run out	 * of memory.	 *	 * @param columnIndex the first column is 1, the second is 2, ...	 * @return the column value; if the value is SQL NULL, the result	 *	is null	 * @exception SQLException if a database access error occurs	 */	public byte[] getBytes(int columnIndex) throws SQLException	{		checkResultSet( columnIndex );		wasNullFlag = (this_row[columnIndex - 1] == null);		if (!wasNullFlag)		{			if (fields[columnIndex -1].getFormat() == Field.BINARY_FORMAT)			{				//If the data is already binary then just return it				return this_row[columnIndex - 1];			}			else if (connection.haveMinimumCompatibleVersion("7.2"))			{				//Version 7.2 supports the bytea datatype for byte arrays				if (fields[columnIndex - 1].getPGType().equals("bytea"))				{					return trimBytes(columnIndex, PGbytea.toBytes(this_row[columnIndex - 1]));				}				else				{					return trimBytes(columnIndex, this_row[columnIndex - 1]);				}			}			else			{				//Version 7.1 and earlier supports LargeObjects for byte arrays				// Handle OID's as BLOBS				if ( fields[columnIndex - 1].getOID() == 26)				{					LargeObjectManager lom = connection.getLargeObjectAPI();					LargeObject lob = lom.open(getInt(columnIndex));					byte buf[] = lob.read(lob.size());					lob.close();					return trimBytes(columnIndex, buf);				}				else				{					return trimBytes(columnIndex, this_row[columnIndex - 1]);				}			}		}		return null;	}	public java.sql.Date getDate(int columnIndex) throws SQLException	{		return toDate( getString(columnIndex) );	}	public Time getTime(int columnIndex) throws SQLException	{		return toTime( getString(columnIndex), this, fields[columnIndex - 1].getPGType() );	}	public Timestamp getTimestamp(int columnIndex) throws SQLException	{		return toTimestamp( getString(columnIndex), this, fields[columnIndex - 1].getPGType() );	}	public InputStream getAsciiStream(int columnIndex) throws SQLException	{		checkResultSet( columnIndex );		wasNullFlag = (this_row[columnIndex - 1] == null);		if (wasNullFlag)			return null;		if (connection.haveMinimumCompatibleVersion("7.2"))		{			//Version 7.2 supports AsciiStream for all the PG text types			//As the spec/javadoc for this method indicate this is to be used for			//large text values (i.e. LONGVARCHAR)	PG doesn't have a separate			//long string datatype, but with toast the text datatype is capable of			//handling very large values.  Thus the implementation ends up calling			//getString() since there is no current way to stream the value from the server			try			{				return new ByteArrayInputStream(getString(columnIndex).getBytes("ASCII"));			}			catch (UnsupportedEncodingException l_uee)			{				throw new PSQLException("postgresql.unusual", PSQLState.UNEXPECTED_ERROR, l_uee);			}		}		else		{			// In 7.1 Handle as BLOBS so return the LargeObject input stream			return getBinaryStream(columnIndex);		}	}	public InputStream getUnicodeStream(int columnIndex) throws SQLException	{		checkResultSet( columnIndex );		wasNullFlag = (this_row[columnIndex - 1] == null);		if (wasNullFlag)			return null;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?