⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 preparedstatement.java

📁 开发MySql数据库的最新JDBC驱动。
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
				// or catch the very rare race condition and handle it here.								checkClosed(); // if we're really closed, this will throw a SQLException								throw npe; // otherwise someone (me!) goofed error			} finally {				clearBatch();			}		}	}	public synchronized boolean canRewriteAsMultivalueInsertStatement() {		if (!this.hasCheckedForRewrite) {			// Needs to be INSERT, can't have INSERT ... SELECT or			// INSERT ... ON DUPLICATE KEY UPDATE			//			// We're not smart enough to re-write to			//			//    INSERT INTO table (a,b,c) VALUES (1,2,3),(4,5,6)			//    ON DUPLICATE KEY UPDATE c=VALUES(a)+VALUES(b);			//			// (yet)			this.canRewrite = StringUtils.startsWithIgnoreCaseAndWs(					this.originalSql, "INSERT", this.statementAfterCommentsPos) 			&& StringUtils.indexOfIgnoreCaseRespectMarker(this.statementAfterCommentsPos, this.originalSql, "SELECT", "\"'`", "\"'`", false) == -1 			&& StringUtils.indexOfIgnoreCaseRespectMarker(this.statementAfterCommentsPos, this.originalSql, "UPDATE", "\"'`", "\"'`", false) == -1;						this.hasCheckedForRewrite = true;		}		return this.canRewrite;	}	/**	 * Rewrites the already prepared statement into a multi-value insert	 * statement of 'statementsPerBatch' values and executes the entire batch	 * using this new statement.	 * 	 * @return update counts in the same fashion as executeBatch()	 * 	 * @throws SQLException	 */	protected int[] executeBatchedInserts() throws SQLException {		String valuesClause = extractValuesClause();		Connection locallyScopedConn = this.connection;				if (valuesClause == null) {			return executeBatchSerially();		}		int numBatchedArgs = this.batchedArgs.size();				if (this.retrieveGeneratedKeys) {			this.batchedGeneratedKeys = new ArrayList(numBatchedArgs);		}		int numValuesPerBatch = computeBatchSize(numBatchedArgs);		if (numBatchedArgs < numValuesPerBatch) {			numValuesPerBatch = numBatchedArgs;		}		java.sql.PreparedStatement batchedStatement = null;		int batchedParamIndex = 1;		int updateCountRunningTotal = 0;		int numberToExecuteAsMultiValue = 0;		int batchCounter = 0;				try {			if (this.retrieveGeneratedKeys) {				batchedStatement = locallyScopedConn.prepareStatement(						generateBatchedInsertSQL(valuesClause, numValuesPerBatch),						RETURN_GENERATED_KEYS);			} else {				batchedStatement = locallyScopedConn						.prepareStatement(generateBatchedInsertSQL(valuesClause,								numValuesPerBatch));			}			if (numBatchedArgs < numValuesPerBatch) {				numberToExecuteAsMultiValue = numBatchedArgs;			} else {				numberToExecuteAsMultiValue = numBatchedArgs / numValuesPerBatch;			}				int numberArgsToExecute = numberToExecuteAsMultiValue * numValuesPerBatch;				for (int i = 0; i < numberArgsToExecute; i++) {				if (i != 0 && i % numValuesPerBatch == 0) {					updateCountRunningTotal += batchedStatement.executeUpdate();						getBatchedGeneratedKeys(batchedStatement);					batchedStatement.clearParameters();					batchedParamIndex = 1;					}					batchedParamIndex = setOneBatchedParameterSet(batchedStatement,						batchedParamIndex, this.batchedArgs						.get(batchCounter++));			}				updateCountRunningTotal += batchedStatement.executeUpdate();			getBatchedGeneratedKeys(batchedStatement);				numValuesPerBatch = numBatchedArgs - batchCounter;		} finally {			if (batchedStatement != null) {				batchedStatement.close();			}		}				try {			if (numValuesPerBatch > 0) {					if (this.retrieveGeneratedKeys) {					batchedStatement = locallyScopedConn.prepareStatement(						generateBatchedInsertSQL(valuesClause, numValuesPerBatch),						RETURN_GENERATED_KEYS);				} else {					batchedStatement = locallyScopedConn.prepareStatement(							generateBatchedInsertSQL(valuesClause, numValuesPerBatch));				}								batchedParamIndex = 1;					while (batchCounter < numBatchedArgs) {					batchedParamIndex = setOneBatchedParameterSet(batchedStatement,							batchedParamIndex, this.batchedArgs							.get(batchCounter++));				}					updateCountRunningTotal += batchedStatement.executeUpdate();				getBatchedGeneratedKeys(batchedStatement);			}				int[] updateCounts = new int[this.batchedArgs.size()];				for (int i = 0; i < this.batchedArgs.size(); i++) {				updateCounts[i] = 1;			}				return updateCounts;		} finally {			if (batchedStatement != null) {				batchedStatement.close();			}		}	}	/**	 * Computes the optimum number of batched parameter lists to send	 * without overflowing max_allowed_packet.	 * 	 * @param numBatchedArgs	 * @return	 */	protected int computeBatchSize(int numBatchedArgs) {		long[] combinedValues = computeMaxParameterSetSizeAndBatchSize(numBatchedArgs);				long maxSizeOfParameterSet = combinedValues[0];		long sizeOfEntireBatch = combinedValues[1];				int maxAllowedPacket = this.connection.getMaxAllowedPacket();				if (sizeOfEntireBatch < maxAllowedPacket - this.originalSql.length()) {			return numBatchedArgs;		}				return (int)Math.max(1, (maxAllowedPacket - this.originalSql.length()) / maxSizeOfParameterSet);	}	/** 	 *  Computes the maximum parameter set size, and entire batch size given 	 *  the number of arguments in the batch.	 */	protected long[] computeMaxParameterSetSizeAndBatchSize(int numBatchedArgs) {		long sizeOfEntireBatch = 0;		long maxSizeOfParameterSet = 0;				for (int i = 0; i < numBatchedArgs; i++) {			BatchParams paramArg = (BatchParams) this.batchedArgs			.get(i);			boolean[] isNullBatch = paramArg.isNull;			boolean[] isStreamBatch = paramArg.isStream;			long sizeOfParameterSet = 0;						for (int j = 0; j < isNullBatch.length; j++) {				if (!isNullBatch[j]) {					if (isStreamBatch[j]) {						int streamLength = paramArg.streamLengths[j];												if (streamLength != -1) {							sizeOfParameterSet += streamLength * 2; // for safety in escaping						}					} else {						sizeOfParameterSet += paramArg.parameterStrings[j].length;					}				} else {					sizeOfParameterSet += 4; // for NULL literal in SQL 				}			}						//			// Account for static part of values clause			// This is a little naiive, because the ?s will be replaced			// but it gives us some padding, and is less housekeeping			// to ignore them. We're looking for a "fuzzy" value here			// anyway			//						sizeOfParameterSet += this.batchedValuesClause.length() + 1; 			sizeOfEntireBatch += sizeOfParameterSet;						if (sizeOfParameterSet > maxSizeOfParameterSet) {				maxSizeOfParameterSet = sizeOfParameterSet;			}		}					return new long[] {maxSizeOfParameterSet, sizeOfEntireBatch};	}	/**	 * Executes the current batch of statements by executing them one-by-one.	 * 	 * @return a list of update counts	 * @throws SQLException	 *             if an error occurs	 */	protected int[] executeBatchSerially() throws SQLException {				Connection locallyScopedConn = this.connection;				if (locallyScopedConn == null) {			checkClosed();		}		int[] updateCounts = null;		if (this.batchedArgs != null) {			int nbrCommands = this.batchedArgs.size();			updateCounts = new int[nbrCommands];			for (int i = 0; i < nbrCommands; i++) {				updateCounts[i] = -3;			}			SQLException sqlEx = null;			int commandIndex = 0;			if (this.retrieveGeneratedKeys) {				this.batchedGeneratedKeys = new ArrayList(nbrCommands);			}			for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {				Object arg = this.batchedArgs.get(commandIndex);				if (arg instanceof String) {					updateCounts[commandIndex] = executeUpdate((String) arg);				} else {					BatchParams paramArg = (BatchParams) arg;					try {						updateCounts[commandIndex] = executeUpdate(								paramArg.parameterStrings,								paramArg.parameterStreams, paramArg.isStream,								paramArg.streamLengths, paramArg.isNull, true);						if (this.retrieveGeneratedKeys) {							java.sql.ResultSet rs = null;							try {								rs = getGeneratedKeysInternal();								while (rs.next()) {									this.batchedGeneratedKeys											.add(new byte[][] { rs.getBytes(1) });								}							} finally {								if (rs != null) {									rs.close();								}							}						}					} catch (SQLException ex) {						updateCounts[commandIndex] = EXECUTE_FAILED;						if (this.continueBatchOnError) {							sqlEx = ex;						} else {							int[] newUpdateCounts = new int[commandIndex];							System.arraycopy(updateCounts, 0, newUpdateCounts,									0, commandIndex);							throw new java.sql.BatchUpdateException(ex									.getMessage(), ex.getSQLState(), ex									.getErrorCode(), newUpdateCounts);						}					}				}			}			if (sqlEx != null) {				throw new java.sql.BatchUpdateException(sqlEx.getMessage(),						sqlEx.getSQLState(), sqlEx.getErrorCode(), updateCounts);			}		}		return (updateCounts != null) ? updateCounts : new int[0];	}	/**	 * Actually execute the prepared statement. This is here so server-side	 * PreparedStatements can re-use most of the code from this class.	 * 	 * @param maxRowsToRetrieve	 *            the max number of rows to return	 * @param sendPacket	 *            the packet to send	 * @param createStreamingResultSet	 *            should a 'streaming' result set be created?	 * @param queryIsSelectOnly	 *            is this query doing a SELECT?	 * @param unpackFields	 *            DOCUMENT ME!	 * 	 * @return the results as a ResultSet	 * 	 * @throws SQLException	 *             if an error occurs.	 */	protected ResultSet executeInternal(int maxRowsToRetrieve,			Buffer sendPacket, boolean createStreamingResultSet,			boolean queryIsSelectOnly, boolean unpackFields, Field[] cachedFields, 			boolean isBatch)			throws SQLException {		try {						synchronized (this.cancelTimeoutMutex) {				this.wasCancelled = false;			}						Connection locallyScopedConnection= this.connection;						this.numberOfExecutions++;				ResultSet rs;						CancelTask timeoutTask = null;				try {				if (locallyScopedConnection.getEnableQueryTimeouts() &&						this.timeoutInMillis != 0						&& locallyScopedConnection.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask();					Connection.getCancelTimer().schedule(timeoutTask, 							this.timeoutInMillis);				}								rs = locallyScopedConnection.execSQL(this, null, maxRowsToRetrieve, sendPacket,					this.resultSetType, this.resultSetConcurrency,					createStreamingResultSet, this.currentCatalog,					unpackFields, isBatch);								if (timeoutTask != null) {					timeoutTask.cancel();										if (timeoutTask.caughtWhileCancelling != null) {						throw timeoutTask.caughtWhileCancelling;					}										timeoutTask = null;				}							synchronized (this.cancelTimeoutMutex) {					if (this.wasCancelled) {						this.wasCancelled = false;						throw new MySQLTimeoutException();					}				}			} finally {				if (timeoutTask != null) {					timeoutTask.cancel();				}			}						return rs;		} catch (NullPointerException npe) {			checkClosed(); // we can't synchronize ourselves against async connection-close			               // due to deadlock issues, so this is the next best thing for			 			   // this particular corner case.						throw npe;		}	}	/**	 * A Prepared SQL query is executed and its ResultSet is returned	 * 	 * @return a ResultSet that contains the data produced by the query - never	 *         null	 * 	 * @exception SQLException	 *                if a database access error occurs	 */	public java.sql.ResultSet executeQuery() throws SQLException {		checkClosed();				Connection locallyScopedConn = this.connection;				checkForDml(this.originalSql, this.firstCharOfStmt);		CachedResultSetMetaData cachedMetadata = null;		// We need to execute this all together		// So synchronize on the Connection's mutex (because		// even queries going through there synchronize		// on the same mutex.		synchronized (locallyScopedConn.getMutex()) {			clearWarnings();			this.batchedGeneratedKeys = null;			Buffer sendPacket = fillSendPacket();			if (this.results != null) {				if (!this.connection.getHoldResultsOpenOverStatementClose()) {					if (!this.holdResultsOpenOverClose) {						this.results.realClose(false);					}				}			}			String oldCatalog = null;			if (!locallyScopedConn.getCatalog().equals(this.currentCatalog)) {				oldCatalog = locallyScopedConn.getCatalog();				locallyScopedConn.setCatalog(this.currentCatalog);			}			//			// Check if we have cached metadata for this query...			//			if (locallyScopedConn.getCacheResultSetMetadata()) {				cachedMetadata = locallyScopedConn.getCachedMetaData(this.originalSql);			}			Field[] metadataFromCache = null;						if (cachedMetadata != null) {				metadataFromCache = cachedMetadata.fields;			}						if (locallyScopedConn.useMaxRows()) {				// If there isn't a limit clause in the SQL				// then limit the number of rows to return in				// an efficient manner. Only do this if				// setMaxRows() hasn't been used on any Statements				// generated from the current Connection (saves				// a query, and network traffic).				if (this.hasLimitClause) {					this.results = executeInternal(this.maxRows, sendPacket,							createStreamingResultSet(), true,							(cachedMetadata == null), metadataFromCache, false);				} else {					if (this.maxRows <= 0) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -