📄 datadictionaryimpl.java
字号:
coreInfo[SYSTABLES_CORE_NUM].setIndexConglomerate( SYSTABLESRowFactory.SYSTABLES_INDEX2_ID, getBootParameter(startParams, CFG_SYSTABLES_INDEX2_ID, true)); // SYSCOLUMNS coreInfo[SYSCOLUMNS_CORE_NUM].setHeapConglomerate( getBootParameter(startParams, CFG_SYSCOLUMNS_ID, true)); coreInfo[SYSCOLUMNS_CORE_NUM].setIndexConglomerate( SYSCOLUMNSRowFactory.SYSCOLUMNS_INDEX1_ID, getBootParameter(startParams, CFG_SYSCOLUMNS_INDEX1_ID, true)); // 2nd syscolumns index added in Xena, hence may not be there coreInfo[SYSCOLUMNS_CORE_NUM].setIndexConglomerate( SYSCOLUMNSRowFactory.SYSCOLUMNS_INDEX2_ID, getBootParameter(startParams, CFG_SYSCOLUMNS_INDEX2_ID, false)); // SYSCONGLOMERATES coreInfo[SYSCONGLOMERATES_CORE_NUM].setHeapConglomerate( getBootParameter(startParams, CFG_SYSCONGLOMERATES_ID, true)); coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate( SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX1_ID, getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX1_ID, true)); coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate( SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX2_ID, getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX2_ID, true)); coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate( SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX3_ID, getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX3_ID, true)); // SYSSCHEMAS coreInfo[SYSSCHEMAS_CORE_NUM].setHeapConglomerate( getBootParameter(startParams, CFG_SYSSCHEMAS_ID, true)); coreInfo[SYSSCHEMAS_CORE_NUM].setIndexConglomerate( SYSSCHEMASRowFactory.SYSSCHEMAS_INDEX1_ID, getBootParameter(startParams, CFG_SYSSCHEMAS_INDEX1_ID, true)); coreInfo[SYSSCHEMAS_CORE_NUM].setIndexConglomerate( SYSSCHEMASRowFactory.SYSSCHEMAS_INDEX2_ID, getBootParameter(startParams, CFG_SYSSCHEMAS_INDEX2_ID, true)); } String value = startParams.getProperty(Property.LANG_TD_CACHE_SIZE); tdCacheSize = PropertyUtil.intPropertyValue(Property.LANG_TD_CACHE_SIZE, value, 0, Integer.MAX_VALUE, Property.LANG_TD_CACHE_SIZE_DEFAULT); value = startParams.getProperty(Property.LANG_SPS_CACHE_SIZE); stmtCacheSize = PropertyUtil.intPropertyValue(Property.LANG_SPS_CACHE_SIZE, value, 0, Integer.MAX_VALUE, Property.LANG_SPS_CACHE_SIZE_DEFAULT); /* * data dictionary contexts are only associated with connections. * we have to look for the basic data dictionary, as there is * no connection, and thus no context stack yet. */ /* * Get the table descriptor cache. */ CacheFactory cf = (CacheFactory) Monitor.startSystemModule(org.apache.derby.iapi.reference.Module.CacheFactory); OIDTdCache = cf.newCacheManager(this, "TableDescriptorOIDCache", tdCacheSize, tdCacheSize); nameTdCache = cf.newCacheManager(this, "TableDescriptorNameCache", tdCacheSize, tdCacheSize); if (stmtCacheSize > 0) { spsNameCache = cf.newCacheManager(this, "SPSNameDescriptorCache", stmtCacheSize, stmtCacheSize); spsIdHash = new Hashtable(stmtCacheSize); // spsTextHash = new Hashtable(stmtCacheSize); } /* Get the object to coordinate cache transitions */ cacheCoordinator = new ShExLockable(); /* Get AccessFactory in order to transaction stuff */ af = (AccessFactory) Monitor.findServiceModule(this, AccessFactory.MODULE); /* Get the lock factory */ lockFactory = af.getLockFactory(); /* * now we need to setup a context stack for the database creation work. * We assume the System boot process has created a context * manager already, but not that contexts we need are there. */ ContextService csf = ContextService.getFactory(); ContextManager cm = csf.getCurrentContextManager(); if (SanityManager.DEBUG) SanityManager.ASSERT((cm != null), "Failed to get current ContextManager"); /* push a datadictionary context onto this stack */ pushDataDictionaryContext(cm, false); // RESOLVE other non-StandardException errors. bootingTC = null; try { // Get a transaction controller. This has the side effect of // creating a transaction context if there isn't one already. bootingTC = af.getTransaction(cm); /* We need an execution context so that we can generate rows REMIND: maybe only for create case? */ exFactory.newExecutionContext(cm); DataDescriptorGenerator ddg = getDataDescriptorGenerator(); if (create) { // create any required tables. createDictionaryTables(startParams, bootingTC, ddg); //create procedures for network server metadata create_SYSIBM_procedures(bootingTC); //create metadata sps statement required for network server createSystemSps(bootingTC); // create the SYSCS_UTIL system procedures) create_SYSCS_procedures(bootingTC); // log the current dictionary version dictionaryVersion = softwareVersion; /* Set properties for current and create time * DataDictionary versions. */ bootingTC.setProperty( DataDictionary.CORE_DATA_DICTIONARY_VERSION, dictionaryVersion, true); bootingTC.setProperty( DataDictionary.CREATE_DATA_DICTIONARY_VERSION, dictionaryVersion, true); } else { // Get the ids for non-core tables loadDictionaryTables(bootingTC, ddg, startParams); } /* Commit & destroy the create database */ bootingTC.commit(); cm.getContext(ExecutionContext.CONTEXT_ID).popMe(); // done with ctx } finally { if (bootingTC != null) { bootingTC.destroy(); // gets rid of the transaction context bootingTC = null; } cm.popContext(); // the data dictionary context; check that it is? } setDependencyManager(); booting = false; } /** * sets the dependencymanager associated with this dd. subclasses can * override this to install their own funky dependency manager. */ protected void setDependencyManager() { dmgr = new BasicDependencyManager(); } /** * returns the dependencymanager associated with this datadictionary. * @see DataDictionary#getDependencyManager */ public DependencyManager getDependencyManager() { return dmgr; } /** * Stop this module. In this case, nothing needs to be done. * * @return Nothing */ public void stop() { } /* ** CacheableFactory interface */ public Cacheable newCacheable(CacheManager cm) { if (cm == OIDTdCache) return new OIDTDCacheable(this); else if (cm == nameTdCache) return new NameTDCacheable(this); else { return new SPSNameCacheable(this); } } /* ** Methods related to ModuleControl */ /** * @see org.apache.derby.iapi.sql.dictionary.DataDictionary#startReading * * @exception StandardException Thrown on error */ public int startReading(LanguageConnectionContext lcc) throws StandardException { int bindCount = lcc.incrementBindCount(); int localCacheMode; boolean needRetry = false; do { if (needRetry) { // could not get lock while holding the synchronized(this), // so now wait until we can get the lock. Once we get the // lock it is automatically released, hopefully when we // go the the synchronized(this) block we will be able to // get the lock, while holding the synchronized(this) // monitor now. try { lockFactory.zeroDurationlockObject( lcc.getTransactionExecute().getLockObject(), cacheCoordinator, ShExQual.SH, C_LockFactory.WAIT_FOREVER); } catch (StandardException e) { // DEADLOCK, timeout will not happen with WAIT_FOREVER lcc.decrementBindCount(); throw e; } needRetry = false; } // "this" is used to synchronize between startReading,doneReading, // and startWriting. synchronized(this) { localCacheMode = getCacheMode(); /* ** Keep track of how deeply nested this bind() operation is. ** It's possible for nested binding to happen if the user ** prepares SQL statements from within a static initializer ** of a class, and calls a method on that class (or uses a ** field in the class). ** ** If nested binding is happening, we only want to lock the ** DataDictionary on the outermost nesting level. */ if (bindCount == 1) { if (localCacheMode == DataDictionary.COMPILE_ONLY_MODE) { if (SanityManager.DEBUG) { SanityManager.ASSERT(ddlUsers == 0, "Cache mode is COMPILE_ONLY and there are DDL users."); } /* ** If we deadlock while waiting for a lock, ** then be sure to restore things as they ** were. */ boolean lockGranted = false; try { // When the C_LockFactory.NO_WAIT is used this // routine will not throw timeout or deadlock // exceptions. The boolean returned will indicate // if the lock was granted or not. If it would // have had to wait, it just returns immediately // and returns false. // // See if we can get this lock granted without // waiting (while holding the dataDictionary // synchronization). lockGranted = lockFactory.lockObject( lcc.getTransactionExecute().getLockObject(), lcc.getTransactionExecute().getLockObject(), cacheCoordinator, ShExQual.SH, C_LockFactory.NO_WAIT); } catch (StandardException e) { // neither TIMEOUT or DEADLOCK can happen with // NO_WAIT flag. This must be some other exception. lcc.decrementBindCount(); throw e; } if (!lockGranted) needRetry = true; } else { readersInDDLMode++; } } } // end of sync block } while (needRetry); return localCacheMode; } /* @see org.apache.derby.iapi.sql.dictionary.DataDictionary#doneReading */ public void doneReading(int mode, LanguageConnectionContext lcc) throws StandardException { int bindCount = lcc.decrementBindCount(); /* This is an arbitrary choice of object to synchronize these methods */ synchronized(this) { /* ** Keep track of how deeply nested this bind() operation is. ** It's possible for nested binding to happen if the user ** prepares SQL statements from within a static initializer ** of a class, and calls a method on that class (or uses a ** field in the class). ** ** If nested binding is happening, we only want to unlock the ** DataDictionary on the outermost nesting level. */ if (bindCount == 0) { if (mode == DataDictionary.COMPILE_ONLY_MODE) { /* ** Release the share lock that was acquired by the reader when ** it called startReading(). ** Beetle 4418, during bind, we may even execute something (eg., in a vti ** constructor) and if a severe error occured, the transaction is rolled ** back and lock released already, so don't try to unlock if statement context ** is cleared. */ if ((lcc.getStatementContext() != null) && lcc.getStatementContext().inUse()) { int unlockCount = lockFactory.unlock(lcc.getTransactionExecute().getLockObject(), lcc.getTransactionExecute().getLockObject(), cacheCoordinator, ShExQual.SH); if (SanityManager.DEBUG) { if (unlockCount != 1) { SanityManager.THROWASSERT("unlockCount not "+ "1 as expected, it is "+unlockCount); } } } } else { readersInDDLMode--; /* ** We can only switch back to cached (COMPILE_ONLY) ** mode if there aren't any readers that started in ** DDL_MODE. Otherwise we could get a reader ** in DDL_MODE that reads a cached object that ** was brought in by a reader in COMPILE_ONLY_MODE. ** If 2nd reader finished and releases it lock ** on the cache there is nothing to pevent another ** writer from coming along an deleting the cached ** object. */ if (ddlUsers == 0 && readersInDDLMode == 0) { clearCaches();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -