⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 insertresultset.java

📁 derby database source code.good for you.
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
/*   Derby - Class org.apache.derby.impl.sql.execute.InsertResultSet   Copyright 1997, 2004 The Apache Software Foundation or its licensors, as applicable.   Licensed under the Apache License, Version 2.0 (the "License");   you may not use this file except in compliance with the License.   You may obtain a copy of the License at      http://www.apache.org/licenses/LICENSE-2.0   Unless required by applicable law or agreed to in writing, software   distributed under the License is distributed on an "AS IS" BASIS,   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   See the License for the specific language governing permissions and   limitations under the License. */package org.apache.derby.impl.sql.execute;import org.apache.derby.iapi.services.loader.GeneratedMethod;import org.apache.derby.iapi.services.context.ContextManager;import org.apache.derby.iapi.services.monitor.Monitor;import org.apache.derby.iapi.services.sanity.SanityManager;import org.apache.derby.iapi.services.stream.HeaderPrintWriter;import org.apache.derby.iapi.services.stream.InfoStreams;import org.apache.derby.iapi.services.io.StreamStorable;import org.apache.derby.iapi.services.loader.GeneratedMethod;import org.apache.derby.iapi.error.StandardException;import org.apache.derby.iapi.sql.StatementUtil;import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;import org.apache.derby.iapi.types.DataValueDescriptor;import org.apache.derby.iapi.types.TypeId;import org.apache.derby.iapi.types.RowLocation;import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;import org.apache.derby.iapi.sql.dictionary.DataDictionary;import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;import org.apache.derby.iapi.sql.dictionary.TableDescriptor;import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;import org.apache.derby.iapi.sql.dictionary.StatisticsDescriptor;import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor;import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;import org.apache.derby.iapi.sql.depend.DependencyManager;import org.apache.derby.iapi.sql.ResultColumnDescriptor ;import org.apache.derby.iapi.reference.SQLState;import org.apache.derby.iapi.sql.execute.ConstantAction;import org.apache.derby.iapi.sql.execute.CursorResultSet;import org.apache.derby.iapi.sql.execute.ExecIndexRow;import org.apache.derby.iapi.sql.execute.ExecRow;import org.apache.derby.iapi.sql.execute.RowChanger;import org.apache.derby.iapi.sql.execute.NoPutResultSet;import org.apache.derby.iapi.sql.execute.TargetResultSet;import org.apache.derby.iapi.types.NumberDataValue;import org.apache.derby.iapi.sql.Activation;import org.apache.derby.iapi.sql.LanguageProperties;import org.apache.derby.iapi.sql.ResultDescription;import org.apache.derby.iapi.sql.ResultSet;import org.apache.derby.iapi.store.access.ColumnOrdering;import org.apache.derby.iapi.store.access.ConglomerateController;import org.apache.derby.iapi.store.access.DynamicCompiledOpenConglomInfo;import org.apache.derby.iapi.store.access.GroupFetchScanController;import org.apache.derby.iapi.store.access.Qualifier;import org.apache.derby.iapi.store.access.RowLocationRetRowSource;import org.apache.derby.iapi.store.access.ScanController;import org.apache.derby.iapi.store.access.SortObserver;import org.apache.derby.iapi.store.access.SortController;import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;import org.apache.derby.iapi.store.access.TransactionController;import org.apache.derby.impl.sql.execute.AutoincrementCounter;import	org.apache.derby.impl.sql.execute.InternalTriggerExecutionContext;import org.apache.derby.catalog.UUID;import org.apache.derby.catalog.types.StatisticsImpl;import org.apache.derby.iapi.db.TriggerExecutionContext;import org.apache.derby.iapi.services.io.FormatableBitSet;import org.apache.derby.iapi.util.StringUtil;import java.util.Enumeration;import java.util.Hashtable;import java.util.Properties;import java.util.Vector;/** * Insert the rows from the source into the specified * base table. This will cause constraints to be checked * and triggers to be executed based on the c's and t's * compiled into the insert plan. */public class InsertResultSet extends DMLWriteResultSet implements TargetResultSet{	// RESOLVE. Embarassingly large public state. If we could move the Replication	// code into the same package, then these variables could be protected.	// passed in at construction time                                                 	private	NoPutResultSet			sourceResultSet;	public  NoPutResultSet			savedSource;	public	InsertConstantAction	constants;	private GeneratedMethod			checkGM;	private long					heapConglom;	//following is for jdbc3.0 feature auto generated keys resultset	public  ResultSet			autoGeneratedKeysResultSet;	private	TemporaryRowHolderImpl	autoGeneratedKeysRowsHolder;	// divined at run time    public	ResultDescription 		resultDescription;	private RowChanger 				rowChanger;	public	TransactionController 	tc;	public	ExecRow					row;	public	LanguageConnectionContext			lcc;		public	boolean					userSpecifiedBulkInsert;	public	boolean					bulkInsertPerformed;	// bulkInsert	protected boolean				bulkInsert;	private boolean					bulkInsertReplace;	private boolean					firstRow = true;	private	boolean[]				needToDropSort;	/*	** This hashtable is used to convert an index conglomerate	** from it's old conglom number to the new one.  It is	** bulk insert specific.	*/	private Hashtable				indexConversionTable;	// indexedCols is 1-based	private FormatableBitSet					indexedCols;	private ConglomerateController	bulkHeapCC;	protected DataDictionary			dd;	protected TableDescriptor			td;			private ExecIndexRow[]			indexRows;	private ExecRow					fullTemplate;	private	long[]					sortIds;	private RowLocationRetRowSource[]                                    rowSources;	private	ScanController			bulkHeapSC;	private ColumnOrdering[][]		ordering;	private SortController[]		sorters;	private	TemporaryRowHolderImpl	rowHolder;	private RowLocation				rl;	private	boolean					hasBeforeStatementTrigger;	private	boolean					hasBeforeRowTrigger;	private	BulkTableScanResultSet	tableScan;	private int						numOpens;	private boolean					firstExecute;	// cached across open()s	private	FKInfo[]				fkInfoArray;	private	TriggerInfo				triggerInfo;	private RISetChecker 			fkChecker;	private TriggerEventActivator	triggerActivator;	/**	 * keeps track of autoincrement values that are generated by 	 * getSetAutoincrementValues.	 */	private NumberDataValue				aiCache[];	/**	 * If set to true, implies that this (rep)insertresultset has generated	 * autoincrement values. During refresh for example, the autoincrement	 * values are not generated but sent from the source to target or 	 * vice-versa.	 */	protected boolean 				autoincrementGenerated;	private long					identityVal;  //support of IDENTITY_LOCAL_VAL function	private boolean					setIdentity;		/**     * Returns the description of the inserted rows.     * REVISIT: Do we want this to return NULL instead?	 */	public ResultDescription getResultDescription()	{	    return resultDescription;	}	// TargetResultSet interface	/**	 * @see TargetResultSet#changedRow	 *	 * @exception StandardException thrown if cursor finish ed.	 */	public void changedRow(ExecRow execRow, RowLocation rowLocation)		throws StandardException	{		if (SanityManager.DEBUG)		{			SanityManager.ASSERT(bulkInsert,				"bulkInsert exected to be true");		}		/* Set up sorters, etc. if 1st row and there are indexes */		if (constants.irgs.length > 0)		{			RowLocation rlClone = (RowLocation) rowLocation.cloneObject();			// Objectify any the streaming columns that are indexed.			for (int i = 0; i < execRow.getRowArray().length; i++)			{				if (! constants.indexedCols[i])				{					continue;				}				if (execRow.getRowArray()[i] instanceof StreamStorable)					((DataValueDescriptor)execRow.getRowArray()[i]).getObject();			}			// Every index row will share the same row location, etc.			if (firstRow)			{				firstRow = false;				indexRows = new ExecIndexRow[constants.irgs.length];				setUpAllSorts(execRow.getNewNullRow(), rlClone);			}			// Put the row into the indexes			for (int index = 0; index < constants.irgs.length; index++)			{				// Get a new object Array for the index				indexRows[index].getNewObjectArray();				// Associate the index row with the source row				constants.irgs[index].getIndexRow(execRow, 											   rlClone, 											   indexRows[index],											   (FormatableBitSet) null);				// Insert the index row into the matching sorter				sorters[index].insert(indexRows[index].getRowArray());			}		}	}	/**	 * Preprocess the source row.  Apply any check constraints here.	 * Do an inplace cloning of all key columns.  For triggers, if	 * we have a before row trigger, we fire it here if we can.	 * This is useful for bulk insert where the store stands between	 * the source and us.	 *	 * @param execRow	The source row.	 *	 * @return The preprocessed source row.	 * @exception StandardException thrown on error	 */	public ExecRow preprocessSourceRow(ExecRow execRow)		throws StandardException	{		//System.out.println("preprocessrow is called ");		/*		** We can process before row triggers now.  All other		** triggers can only be fired after we have inserted		** all our rows.		*/		if (hasBeforeRowTrigger)		{			// RESOLVE			// Possibly dead code-- if there are triggers we don't do bulk insert.			rowHolder.truncate();			rowHolder.insert(execRow);			triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT,											(CursorResultSet)null,											rowHolder.getResultSet());		} 		if (checkGM != null && !hasBeforeStatementTrigger)		{			evaluateCheckConstraints();		}		// RESOLVE - optimize the cloning		if (constants.irgs.length > 0)		{			/* Do in-place cloning of all of the key columns */			return execRow.getClone(indexedCols);		}		else		{			return execRow;		}	}	/**	  *	Run the check constraints against the current row. Raise an error if	  * a check constraint is violated.	  *	  * @return Nothing.	  * @exception StandardException thrown on error	  */	private	void	evaluateCheckConstraints()		throws StandardException	{		if (checkGM != null)		{			// Evaluate the check constraints. The expression evaluation			// will throw an exception if there is a violation, so there			// is no need to check the result of the expression.			checkGM.invoke(activation);		}	}    /*     * class interface     *     */    /**	 *	 * @exception StandardException		Thrown on error     */    public InsertResultSet(NoPutResultSet source, 						   GeneratedMethod checkGM,						   Activation activation)		throws StandardException    {		super(activation);		sourceResultSet = source;		constants = (InsertConstantAction) constantAction;		this.checkGM = checkGM;		heapConglom = constants.conglomId; 		lcc = activation.getLanguageConnectionContext();        tc = activation.getTransactionController();		fkInfoArray = constants.getFKInfo( lcc.getExecutionContext() );		triggerInfo = constants.getTriggerInfo(lcc.getExecutionContext());				/*		** If we have a before statement trigger, then		** we cannot check constraints inline.		*/		hasBeforeStatementTrigger = (triggerInfo != null) ?				triggerInfo.hasTrigger(true, false) :				false;		hasBeforeRowTrigger = (triggerInfo != null) ?				triggerInfo.hasTrigger(true, true) :				false;        resultDescription = sourceResultSet.getResultDescription();		// Is this a bulkInsert or regular insert?		String insertMode = constants.getProperty("insertMode");				RowLocation[] rla;		if ((rla = constants.getAutoincRowLocation()) != null)		{			aiCache = 				new NumberDataValue[rla.length];			for (int i = 0; i < resultDescription.getColumnCount(); i++)			{				if (rla[i] == null)					continue;				ResultColumnDescriptor rcd = 					resultDescription.getColumnDescriptor(i + 1);				aiCache[i] = (NumberDataValue)rcd.getType().getNull();			}		}		if (insertMode != null)		{			if (StringUtil.SQLEqualsIgnoreCase(insertMode,"BULKINSERT"))			{				userSpecifiedBulkInsert = true;			}			else if (StringUtil.SQLEqualsIgnoreCase(insertMode,"REPLACE"))			{				userSpecifiedBulkInsert = true;				bulkInsertReplace = true;				bulkInsert = true;				/*				** For now, we don't allow bulk insert replace when 				** there is a trigger. 				*/				if (triggerInfo != null)				{					TriggerDescriptor td = triggerInfo.getTriggerArray()[0];					throw StandardException.newException(SQLState.LANG_NO_BULK_INSERT_REPLACE_WITH_TRIGGER_DURING_EXECUTION, constants.getTableName(), td.getName());				}			}		}		//System.out.println("new InsertResultSet " + sourceResultSet.getClass());	}		/**		@exception StandardException Standard Cloudscape error policy	*/	public void open() throws StandardException	{		// Remember if this is the 1st execution		firstExecute = (rowChanger == null);		autoincrementGenerated = false;		dd = lcc.getDataDictionary();		/*		** verify the auto-generated key columns list(ie there are no invalid column		** names or positions). This is done at at execution time because for a precompiled		** insert statement, user can specify different column selections for		** auto-generated keys.		*/		if(activation.getAutoGeneratedKeysResultsetMode())		{			if (activation.getAutoGeneratedKeysColumnIndexes() != null)				verifyAutoGeneratedColumnsIndexes(activation.getAutoGeneratedKeysColumnIndexes());			else  if (activation.getAutoGeneratedKeysColumnNames() != null)				verifyAutoGeneratedColumnsNames(activation.getAutoGeneratedKeysColumnNames());

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -