⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 statementimpl.java

📁 mysql5.0 JDBC 驱动 放在glassfish或者tomcat的lib文件夹下就可以了
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
						CancelTask timeoutTask = null;						try {				resetCancelledState();				this.retrieveGeneratedKeys = true; // The JDBC spec doesn't forbid this, but doesn't provide for it either...we do..				int[] updateCounts = null;								if (this.batchedArgs != null) {					int nbrCommands = this.batchedArgs.size();					this.batchedGeneratedKeys = new ArrayList(this.batchedArgs.size());					boolean multiQueriesEnabled = locallyScopedConn.getAllowMultiQueries();					if (locallyScopedConn.versionMeetsMinimum(4, 1, 1) &&							(multiQueriesEnabled ||							(locallyScopedConn.getRewriteBatchedStatements() &&									nbrCommands > 4))) {						return executeBatchUsingMultiQueries(multiQueriesEnabled, nbrCommands, individualStatementTimeout);					}					if (locallyScopedConn.getEnableQueryTimeouts() &&							individualStatementTimeout != 0							&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {						timeoutTask = new CancelTask(this);						ConnectionImpl.getCancelTimer().schedule(timeoutTask,								individualStatementTimeout);					}										updateCounts = new int[nbrCommands];					for (int i = 0; i < nbrCommands; i++) {						updateCounts[i] = -3;					}					SQLException sqlEx = null;					int commandIndex = 0;					for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {						try {							updateCounts[commandIndex] = executeUpdate((String) this.batchedArgs									.get(commandIndex), true, true);							getBatchedGeneratedKeys();						} catch (SQLException ex) {							updateCounts[commandIndex] = EXECUTE_FAILED;							if (this.continueBatchOnError && 									!(ex instanceof MySQLTimeoutException) && 									!(ex instanceof MySQLStatementCancelledException) &&                                    !hasDeadlockOrTimeoutRolledBackTx(ex)) {								sqlEx = ex;							} else {								int[] newUpdateCounts = new int[commandIndex];																if (hasDeadlockOrTimeoutRolledBackTx(ex)) {									for (int i = 0; i < newUpdateCounts.length; i++) {										newUpdateCounts[i] = Statement.EXECUTE_FAILED;									}								} else {									System.arraycopy(updateCounts, 0,										newUpdateCounts, 0, commandIndex);								}								throw new java.sql.BatchUpdateException(ex										.getMessage(), ex.getSQLState(), ex										.getErrorCode(), newUpdateCounts);							}						}					}					if (sqlEx != null) {						throw new java.sql.BatchUpdateException(sqlEx								.getMessage(), sqlEx.getSQLState(), sqlEx								.getErrorCode(), updateCounts);					}				}				if (timeoutTask != null) {					if (timeoutTask.caughtWhileCancelling != null) {						throw timeoutTask.caughtWhileCancelling;					}					timeoutTask.cancel();					timeoutTask = null;				}								return (updateCounts != null) ? updateCounts : new int[0];			} finally {								if (timeoutTask != null) {					timeoutTask.cancel();				}								resetCancelledState();								this.timeoutInMillis = individualStatementTimeout;				clearBatch();			}		}	}	protected final boolean hasDeadlockOrTimeoutRolledBackTx(SQLException ex) {		int vendorCode = ex.getErrorCode();				switch (vendorCode) {		case MysqlErrorNumbers.ER_LOCK_DEADLOCK:		case MysqlErrorNumbers.ER_LOCK_TABLE_FULL:			return true;		case MysqlErrorNumbers.ER_LOCK_WAIT_TIMEOUT:			try {				return !this.connection.versionMeetsMinimum(5, 0, 13);			} catch (SQLException sqlEx) {				// won't actually be thrown in this case				return false;			}		default:			return false;		}	}	/**	 * Rewrites batch into a single query to send to the server. This method	 * will constrain each batch to be shorter than max_allowed_packet on the	 * server.	 *	 * @return update counts in the same manner as executeBatch()	 * @throws SQLException	 */	private int[] executeBatchUsingMultiQueries(boolean multiQueriesEnabled,			int nbrCommands, int individualStatementTimeout) throws SQLException {		ConnectionImpl locallyScopedConn = this.connection;		if (!multiQueriesEnabled) {			locallyScopedConn.getIO().enableMultiQueries();		}		java.sql.Statement batchStmt = null;		CancelTask timeoutTask = null;				try {			int[] updateCounts = new int[nbrCommands];			for (int i = 0; i < nbrCommands; i++) {				updateCounts[i] = -3;			}			int commandIndex = 0;			StringBuffer queryBuf = new StringBuffer();			batchStmt = locallyScopedConn.createStatement();			if (locallyScopedConn.getEnableQueryTimeouts() &&					individualStatementTimeout != 0					&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {				timeoutTask = new CancelTask((StatementImpl)batchStmt);				ConnectionImpl.getCancelTimer().schedule(timeoutTask,						individualStatementTimeout);			}						int counter = 0;			int numberOfBytesPerChar = 1;			String connectionEncoding = locallyScopedConn.getEncoding();			if (StringUtils.startsWithIgnoreCase(connectionEncoding, "utf")) {				numberOfBytesPerChar = 3;			} else if (CharsetMapping.isMultibyteCharset(connectionEncoding)) {				numberOfBytesPerChar = 2;			}			int escapeAdjust = 1;			if (this.doEscapeProcessing) {				escapeAdjust = 2; /* We assume packet _could_ grow by this amount, as we're not				                     sure how big statement will end up after				                     escape processing */			}			SQLException sqlEx = null;						int argumentSetsInBatchSoFar = 0;						for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {				String nextQuery = (String) this.batchedArgs.get(commandIndex);				if (((((queryBuf.length() + nextQuery.length())						* numberOfBytesPerChar) + 1 /* for semicolon */						+ MysqlIO.HEADER_LENGTH) * escapeAdjust)  + 32 > this.connection						.getMaxAllowedPacket()) {					try {						batchStmt.execute(queryBuf.toString(), Statement.RETURN_GENERATED_KEYS);					} catch (SQLException ex) {						sqlEx = handleExceptionForBatch(commandIndex,								argumentSetsInBatchSoFar, updateCounts, ex);					}					counter = processMultiCountsAndKeys((StatementImpl)batchStmt, counter, 							updateCounts);					queryBuf = new StringBuffer();					argumentSetsInBatchSoFar = 0;				}				queryBuf.append(nextQuery);				queryBuf.append(";");				argumentSetsInBatchSoFar++;			}			if (queryBuf.length() > 0) {				try {					batchStmt.execute(queryBuf.toString(), Statement.RETURN_GENERATED_KEYS);				} catch (SQLException ex) {					sqlEx = handleExceptionForBatch(commandIndex - 1,							argumentSetsInBatchSoFar, updateCounts, ex);				}				counter = processMultiCountsAndKeys((StatementImpl)batchStmt, counter, 						updateCounts);			}			if (timeoutTask != null) {				if (timeoutTask.caughtWhileCancelling != null) {					throw timeoutTask.caughtWhileCancelling;				}				timeoutTask.cancel();				timeoutTask = null;			}						if (sqlEx != null) {				throw new java.sql.BatchUpdateException(sqlEx						.getMessage(), sqlEx.getSQLState(), sqlEx						.getErrorCode(), updateCounts);			}						return (updateCounts != null) ? updateCounts : new int[0];		} finally {			if (timeoutTask != null) {				timeoutTask.cancel();			}						resetCancelledState();						try {				if (batchStmt != null) {					batchStmt.close();				}			} finally {				if (!multiQueriesEnabled) {					locallyScopedConn.getIO().disableMultiQueries();				}			}		}	}		protected int processMultiCountsAndKeys(			StatementImpl batchedStatement,			int updateCountCounter, int[] updateCounts) throws SQLException {		updateCounts[updateCountCounter++] = batchedStatement.getUpdateCount();				boolean doGenKeys = this.batchedGeneratedKeys != null;		byte[][] row = null;				if (doGenKeys) {			long generatedKey = batchedStatement.getLastInsertID();					row = new byte[1][];			row[0] = Long.toString(generatedKey).getBytes();			this.batchedGeneratedKeys.add(new ByteArrayRow(row));		}		while (batchedStatement.getMoreResults()				|| batchedStatement.getUpdateCount() != -1) {			updateCounts[updateCountCounter++] = batchedStatement.getUpdateCount();						if (doGenKeys) {				long generatedKey = batchedStatement.getLastInsertID();								row = new byte[1][];				row[0] = Long.toString(generatedKey).getBytes();				this.batchedGeneratedKeys.add(new ByteArrayRow(row));			}		}				return updateCountCounter;	}		protected SQLException handleExceptionForBatch(int endOfBatchIndex,			int numValuesPerBatch, int[] updateCounts, SQLException ex)			throws BatchUpdateException {		SQLException sqlEx;			for (int j = endOfBatchIndex; j > endOfBatchIndex - numValuesPerBatch; j--) {			updateCounts[j] = EXECUTE_FAILED;		}		if (this.continueBatchOnError && 				!(ex instanceof MySQLTimeoutException) && 				!(ex instanceof MySQLStatementCancelledException) &&				!hasDeadlockOrTimeoutRolledBackTx(ex)) {			sqlEx = ex;		} else {			int[] newUpdateCounts = new int[endOfBatchIndex];			System.arraycopy(updateCounts, 0,					newUpdateCounts, 0, endOfBatchIndex);			throw new java.sql.BatchUpdateException(ex					.getMessage(), ex.getSQLState(), ex					.getErrorCode(), newUpdateCounts);		}				return sqlEx;	}		/**	 * Execute a SQL statement that retruns a single ResultSet	 *	 * @param sql	 *            typically a static SQL SELECT statement	 *	 * @return a ResulSet that contains the data produced by the query	 *	 * @exception SQLException	 *                if a database access error occurs	 */	public java.sql.ResultSet executeQuery(String sql)			throws SQLException {		checkClosed();		ConnectionImpl locallyScopedConn = this.connection;		synchronized (locallyScopedConn.getMutex()) {			this.retrieveGeneratedKeys = false;						resetCancelledState();			checkNullOrEmptyQuery(sql);			boolean doStreaming = createStreamingResultSet();			// Adjust net_write_timeout to a higher value if we're			// streaming result sets. More often than not, someone runs into			// an issue where they blow net_write_timeout when using this			// feature, and if they're willing to hold a result set open			// for 30 seconds or more, one more round-trip isn't going to hurt			//			// This is reset by RowDataDynamic.close().			if (doStreaming					&& this.connection.getNetTimeoutForStreamingResults() > 0) {				executeSimpleNonQuery(locallyScopedConn, "SET net_write_timeout="						+ this.connection.getNetTimeoutForStreamingResults());			}			if (this.doEscapeProcessing) {				Object escapedSqlResult = EscapeProcessor.escapeSQL(sql,						locallyScopedConn.serverSupportsConvertFn(), this.connection);				if (escapedSqlResult instanceof String) {					sql = (String) escapedSqlResult;				} else {					sql = ((EscapeProcessorResult) escapedSqlResult).escapedSql;				}			}			char firstStatementChar = StringUtils.firstNonWsCharUc(sql,					findStartOfStatement(sql));			if (sql.charAt(0) == '/') {				if (sql.startsWith(PING_MARKER)) {					doPingInstead();									return this.results;				}			}						checkForDml(sql, firstStatementChar);			if (this.results != null) {				if (!locallyScopedConn.getHoldResultsOpenOverStatementClose()) {					this.results.realClose(false);				}			}			CachedResultSetMetaData cachedMetaData = null;			// If there isn't a limit clause in the SQL			// then limit the number of rows to return in			// an efficient manner. Only do this if			// setMaxRows() hasn't been used on any Statements			// generated from the current Connection (saves			// a query, and network traffic).			if (useServerFetch()) {				this.results = createResultSetUsingServerFetch(sql);				return this.results;			}			CancelTask timeoutTask = null;			String oldCatalog = null;			try {				if (locallyScopedConn.getEnableQueryTimeouts() &&						this.timeoutInMillis != 0						&& locallyScopedConn.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask(this);					ConnectionImpl.getCancelTimer().schedule(timeoutTask,							this.timeoutInMillis);				}				if (!locallyScopedConn.getCatalog().equals(this.currentCatalog)) {					oldCatalog = locallyScopedConn.getCatalog();					locallyScopedConn.setCatalog(this.currentCatalog);				}				//				// Check if we have cached metadata for this query...				//				Field[] cachedFields = null;				if (locallyScopedConn.getCacheResultSetMetadata()) {					cachedMetaData = locallyScopedConn.getCachedMetaData(sql);					if (cachedMetaData != null) {						cachedFields = cachedMetaData.fields;					}				}				if (locallyScopedConn.useMaxRows()) {					// We need to execute this all together					// So synchronize on the Connection's mutex (because					// even queries going through there synchronize					// on the connection					if (StringUtils.indexOfIgnoreCase(sql, "LIMIT") != -1) { //$NON-NLS-1$						this.results = locallyScopedConn.execSQL(this, sql,								this.maxRows, null, this.resultSetType,								this.resultSetConcurrency,								doStreaming,								this.currentCatalog, cachedFields);					} else {						if (this.maxRows <= 0) {							executeSimpleNonQuery(locallyScopedConn,									"SET OPTION SQL_SELECT_LIMIT=DEFAULT");						} else {							executeSimpleNonQuery(locallyScopedConn,											"SET OPTION SQL_SELECT_LIMIT=" + this.maxRows);						}						this.results = locallyScopedConn.execSQL(this, sql, -1,								null, this.resultSetType,								this.resultSetConcurrency,								doStreaming,								this.currentCatalog, cachedFields);						if (oldCatalog != null) {							locallyScopedConn.setCatalog(oldCatalog);						}					}				} else {					this.results = locallyScopedConn.execSQL(this, sql, -1, null,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -