⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 altertableconstantaction.java

📁 derby database source code.good for you.
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
		this.dd = lcc.getDataDictionary();		this.dm = dd.getDependencyManager();		this.tc = lcc.getTransactionExecute();		this.activation = activation;		if (SanityManager.DEBUG)		{			if (lockGranularity != '\0')			{				SanityManager.THROWASSERT(					"lockGranularity expected to be '\0', not " + lockGranularity);			}			SanityManager.ASSERT(columnInfo == null,				"columnInfo expected to be null");			SanityManager.ASSERT(constraintActions == null,				 "constraintActions expected to be null");		}		//truncate table is not allowed if there are any tables referencing it.		//except if it is self referencing.		ConstraintDescriptorList cdl = dd.getConstraintDescriptors(td);		for(int index = 0; index < cdl.size(); index++)		{			ConstraintDescriptor cd = cdl.elementAt(index);			if (cd instanceof ReferencedKeyConstraintDescriptor)			{				ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;				if(rfcd.hasNonSelfReferencingFK(ConstraintDescriptor.ENABLED))				{					throw StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE,td.getName());				}			}		}		//truncate is not allowed when there are enabled DELETE triggers		GenericDescriptorList tdl = dd.getTriggerDescriptors(td);		Enumeration descs = tdl.elements();		while (descs.hasMoreElements())		{			TriggerDescriptor trd = (TriggerDescriptor) descs.nextElement();			if (trd.listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE) &&				trd.isEnabled())			{				throw					StandardException.newException(SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS,												   td.getName(),trd.getName());				}		}		//gather information from the existing conglomerate to create new one.		emptyHeapRow = td.getEmptyExecRow(lcc.getContextManager());		compressHeapCC = tc.openConglomerate(								td.getHeapConglomerateId(),                                false,                                TransactionController.OPENMODE_FORUPDATE,                                TransactionController.MODE_TABLE,                                TransactionController.ISOLATION_SERIALIZABLE);		// invalidate any prepared statements that		// depended on this table (including this one)		// bug 3653 has threads that start up and block on our lock, but do		// not see they have to recompile their plan.    We now invalidate earlier		// however they still might recompile using the old conglomerate id before we		// commit our DD changes.		//		dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);		rl = compressHeapCC.newRowLocationTemplate();		// Get the properties on the old heap		compressHeapCC.getInternalTablePropertySet(properties);		compressHeapCC.close();		compressHeapCC = null;		//create new conglomerate		newHeapConglom = tc.createConglomerate(									"heap",									emptyHeapRow.getRowArray(),									null, //column sort order - not required for heap									properties,									TransactionController.IS_DEFAULT);				/* Set up index info to perform truncate on them*/		getAffectedIndexes(activation);		if(numIndexes > 0)		{			indexRows = new ExecIndexRow[numIndexes];			ordering = new ColumnOrdering[numIndexes][];			for (int index = 0; index < numIndexes; index++)			{				// create a single index row template for each index				indexRows[index] = compressIRGs[index].getIndexRowTemplate();				compressIRGs[index].getIndexRow(emptyHeapRow, 											  rl, 											  indexRows[index],											  (FormatableBitSet) null);				/* For non-unique indexes, we order by all columns + the RID.				 * For unique indexes, we just order by the columns.				 * No need to try to enforce uniqueness here as				 * index should be valid.				 */				int[] baseColumnPositions = compressIRGs[index].baseColumnPositions();				boolean[] isAscending = compressIRGs[index].isAscending();				int numColumnOrderings;				numColumnOrderings = baseColumnPositions.length + 1;				ordering[index] = new ColumnOrdering[numColumnOrderings];				for (int ii =0; ii < numColumnOrderings - 1; ii++) 				{					ordering[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);				}				ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(numColumnOrderings - 1);			}		}		/*		** Inform the data dictionary that we are about to write to it.		** There are several calls to data dictionary "get" methods here		** that might be done in "read" mode in the data dictionary, but		** it seemed safer to do this whole operation in "write" mode.		**		** We tell the data dictionary we're done writing at the end of		** the transaction.		*/		dd.startWriting(lcc);		// truncate  all indexes		if(numIndexes > 0)		{			long[] newIndexCongloms = new long[numIndexes];			for (int index = 0; index < numIndexes; index++)			{				updateIndex(newHeapConglom, dd, index, newIndexCongloms);			}		}		// Update the DataDictionary		// Get the ConglomerateDescriptor for the heap		long oldHeapConglom = td.getHeapConglomerateId();		ConglomerateDescriptor cd = td.getConglomerateDescriptor(oldHeapConglom);		// Update sys.sysconglomerates with new conglomerate #		dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);		// Drop the old conglomerate		tc.dropConglomerate(oldHeapConglom);		cleanUp();	}	/**	 * Update all of the indexes on a table when doing a bulk insert	 * on an empty table.	 *	 * @exception StandardException					thrown on error	 */	private void updateAllIndexes(long newHeapConglom, 								  DataDictionary dd)		throws StandardException    {		long[] newIndexCongloms = new long[numIndexes];		/* Populate each index (one at a time or all at once). */		if (sequential)		{			// First sorter populated during heap compression			if (numIndexes >= 1)			{				updateIndex(newHeapConglom, dd, 0, newIndexCongloms);			}			for (int index = 1; index < numIndexes; index++)			{				// Scan heap and populate next sorter				openBulkFetchScan(newHeapConglom);				while (getNextRowFromRowSource() != null)				{					objectifyStreamingColumns();					insertIntoSorter(index, compressRL[currentCompressRow - 1]);				}				updateIndex(newHeapConglom, dd, index, newIndexCongloms);				closeBulkFetchScan();			}		}		else		{			for (int index = 0; index < numIndexes; index++)			{				updateIndex(newHeapConglom, dd, index, newIndexCongloms);			}		}	}	private void updateIndex(long newHeapConglom, DataDictionary dd,							 int index, long[] newIndexCongloms)		throws StandardException	{		ConglomerateController indexCC;		Properties properties = new Properties();		ConglomerateDescriptor cd;		// Get the ConglomerateDescriptor for the index		cd = td.getConglomerateDescriptor(indexConglomerateNumbers[index]);		// Build the properties list for the new conglomerate		indexCC = tc.openConglomerate(							indexConglomerateNumbers[index],                            false,                            TransactionController.OPENMODE_FORUPDATE,                            TransactionController.MODE_TABLE,                            TransactionController.ISOLATION_SERIALIZABLE);		// Get the properties on the old index		indexCC.getInternalTablePropertySet(properties);		/* Create the properties that language supplies when creating the		 * the index.  (The store doesn't preserve these.)		 */		int indexRowLength = indexRows[index].nColumns();		properties.put("baseConglomerateId", Long.toString(newHeapConglom));		if (cd.getIndexDescriptor().isUnique())		{			properties.put("nUniqueColumns", 						   Integer.toString(indexRowLength - 1));		}		else		{			properties.put("nUniqueColumns", 						   Integer.toString(indexRowLength));		}		properties.put("rowLocationColumn", 						Integer.toString(indexRowLength - 1));		properties.put("nKeyFields", Integer.toString(indexRowLength));		indexCC.close();		// We can finally drain the sorter and rebuild the index		// RESOLVE - all indexes are btrees right now		// Populate the index.				RowLocationRetRowSource cCount = null;		boolean updateStatistics = false;		if(!truncateTable)		{			sorters[index].close();			sorters[index] = null;			if (td.statisticsExist(cd))			{				cCount = new CardinalityCounter(tc.openSortRowSource(sortIds[index]));				updateStatistics = true;			}			else				cCount = tc.openSortRowSource(sortIds[index]);			newIndexCongloms[index] = tc.createAndLoadConglomerate(								   "BTREE",								   indexRows[index].getRowArray(),								   ordering[index],								   properties,								   TransactionController.IS_DEFAULT,								   cCount,								   (long[]) null);		}else		{			newIndexCongloms[index] = tc.createConglomerate(								   "BTREE",									indexRows[index].getRowArray(),									ordering[index],									properties,								   TransactionController.IS_DEFAULT);			//on truncate drop the statistics because we know for sure 			//rowscount is zero and existing statistic will be invalid.			if (td.statisticsExist(cd))				dd.dropStatisticsDescriptors(td.getUUID(), cd.getUUID(), tc);		}		if (updateStatistics)		{			dd.dropStatisticsDescriptors(td.getUUID(), cd.getUUID(), tc);			long numRows;			if ((numRows = ((CardinalityCounter)cCount).getRowCount()) > 0)			{				long[] c = ((CardinalityCounter)cCount).getCardinality();				for (int i = 0; i < c.length; i++)				{					StatisticsDescriptor statDesc = 						new StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(),													cd.getUUID(), td.getUUID(), "I", new StatisticsImpl(numRows, c[i]),													i + 1);					dd.addDescriptor(statDesc, null, // no parent descriptor									 DataDictionary.SYSSTATISTICS_CATALOG_NUM,									 true, tc);	// no error on duplicate.				}			}		}		/* Update the DataDictionary		 * RESOLVE - this will change in 1.4 because we will get		 * back the same conglomerate number		 *		 * Update sys.sysconglomerates with new conglomerate #, we need to		 * update all (if any) duplicate index entries sharing this same		 * conglomerate.		 */		dd.updateConglomerateDescriptor(				td.getConglomerateDescriptors(indexConglomerateNumbers[index]),				newIndexCongloms[index], tc);		// Drop the old conglomerate		tc.dropConglomerate(indexConglomerateNumbers[index]);	}	/**	 * Get info on the indexes on the table being compress. 	 *	 * @return	Nothing	 *	 * @exception StandardException		Thrown on error	 */	private void getAffectedIndexes(Activation activation)		throws StandardException	{		IndexLister	indexLister = td.getIndexLister( );		/* We have to get non-distinct index row generaters and conglom numbers		 * here and then compress it to distinct later because drop column		 * will need to change the index descriptor directly on each index		 * entry in SYSCONGLOMERATES, on duplicate indexes too.		 */		compressIRGs = indexLister.getIndexRowGenerators();		numIndexes = compressIRGs.length;		indexConglomerateNumbers = indexLister.getIndexConglomerateNumbers();		if (! (compressTable || truncateTable))		// then it's drop column		{			for (int i = 0; i < compressIRGs.length; i++)			{				int[] baseColumnPositions = compressIRGs[i].baseColumnPositions();				int j;				for (j = 0; j < baseColumnPositions.length; j++)					if (baseColumnPositions[j] == columnPosition) break;				if (j == baseColumnPositions.length)	// not related					continue;									if (baseColumnPositions.length == 1 || 					(behavior == StatementType.DROP_CASCADE && compressIRGs[i].isUnique()))				{					numIndexes--;					/* get first conglomerate with this conglom number each time					 * and each duplicate one will be eventually all dropped					 */					ConglomerateDescriptor cd = td.getConglomerateDescriptor												(indexConglomerateNumbers[i]);					DropIndexConstantAction.dropIndex(dm, dd, tc, cd, td, activation);					compressIRGs[i] = null;		// mark it					continue;				}				// give an error for unique index on multiple columns including				// the column we are to drop (restrict), such index is not for				// a constraint, because constraints have already been handled				if (compressIRGs[i].isUnique())				{					ConglomerateDescriptor cd = td.getConglomerateDescriptor												(indexConglomerateNumbers[i]);					throw StandardException.newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,										dm.getActionString(DependencyManager.DROP_COLUMN),										columnInfo[0].name, "UNIQUE INDEX",										cd.getConglomerateName() );				}			}			IndexRowGenerator[] newIRGs = new IndexRowGenerator[numIndexes];			long[] newIndexConglomNumbers = new long[numIndexes];			for (int i = 0, j = 0; i < numIndexes; i++, j++)			{				while (compressIRGs[j] == null)					j++;				int[] baseColumnPositions = compressIRGs[j].baseColumnPositions();				newIRGs[i] = compressIRGs[j];				newIndexConglomNumbers[i] = indexConglomerateNumbers[j];				boolean[] isAscending = compressIRGs[j].isAscending();				boolean reMakeArrays = false;				int size = baseColumnPositions.length;				for (int k = 0; k < size; k++)				{					if (baseColumnPositions[k] > columnPosition)						baseColumnPositions[k]--;					else if (baseColumnPositions[k] == columnPosition)					{						baseColumnPositions[k] = 0;		// mark it						reMakeArrays = true;					}				}				if (reMakeArrays)				{					size--;					int[] newBCP = new int[size];					boolean[] newIsAscending = new boolean[size];					for (int k = 0, step = 0; k < size; k++)					{						if (step == 0 && baseColumnPositions[k + step] == 0)							step++;						newBCP[k] = baseColumnPositions[k + step];						newIsAscending[k] = isAscending[k + step];					}					IndexDescriptor id = compressIRGs[j].getIndexDescriptor();					id.setBaseColumnPositions(newBCP);					id.setIsAscending(newIsAscending);					id.setNumberOfOrderedColumns(id.numberOfOrderedColumns() - 1);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -