⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 preparedstatement.java

📁 mysql5.0 JDBC 驱动 放在glassfish或者tomcat的lib文件夹下就可以了
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
		int[] updateCounts = new int[numBatchedArgs];		for (int i = 0; i < this.batchedArgs.size(); i++) {			updateCounts[i] = 1;		}				try {			try {				if (this.retrieveGeneratedKeys) {					batchedStatement = locallyScopedConn.prepareStatement(							generateBatchedInsertSQL(valuesClause,									numValuesPerBatch), RETURN_GENERATED_KEYS);				} else {					batchedStatement = locallyScopedConn							.prepareStatement(generateBatchedInsertSQL(									valuesClause, numValuesPerBatch));				}				if (this.connection.getEnableQueryTimeouts()						&& batchTimeout != 0						&& this.connection.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask(							(StatementImpl) batchedStatement);					ConnectionImpl.getCancelTimer().schedule(timeoutTask,							batchTimeout);				}				if (numBatchedArgs < numValuesPerBatch) {					numberToExecuteAsMultiValue = numBatchedArgs;				} else {					numberToExecuteAsMultiValue = numBatchedArgs							/ numValuesPerBatch;				}				int numberArgsToExecute = numberToExecuteAsMultiValue						* numValuesPerBatch;				for (int i = 0; i < numberArgsToExecute; i++) {					if (i != 0 && i % numValuesPerBatch == 0) {						try {							updateCountRunningTotal += batchedStatement								.executeUpdate();						} catch (SQLException ex) {							sqlEx = handleExceptionForBatch(batchCounter - 1,									numValuesPerBatch, updateCounts, ex);						}						getBatchedGeneratedKeys(batchedStatement);						batchedStatement.clearParameters();						batchedParamIndex = 1;					}					batchedParamIndex = setOneBatchedParameterSet(							batchedStatement, batchedParamIndex,							this.batchedArgs.get(batchCounter++));				}				try {					updateCountRunningTotal += batchedStatement.executeUpdate();				} catch (SQLException ex) {					sqlEx = handleExceptionForBatch(batchCounter - 1,							numValuesPerBatch, updateCounts, ex);				}								getBatchedGeneratedKeys(batchedStatement);				numValuesPerBatch = numBatchedArgs - batchCounter;			} finally {				if (batchedStatement != null) {					batchedStatement.close();				}			}			try {				if (numValuesPerBatch > 0) {					if (this.retrieveGeneratedKeys) {						batchedStatement = locallyScopedConn.prepareStatement(								generateBatchedInsertSQL(valuesClause,										numValuesPerBatch),								RETURN_GENERATED_KEYS);					} else {						batchedStatement = locallyScopedConn								.prepareStatement(generateBatchedInsertSQL(										valuesClause, numValuesPerBatch));					}					if (timeoutTask != null) {						timeoutTask.toCancel = (StatementImpl) batchedStatement;					}					batchedParamIndex = 1;					while (batchCounter < numBatchedArgs) {						batchedParamIndex = setOneBatchedParameterSet(								batchedStatement, batchedParamIndex,								this.batchedArgs.get(batchCounter++));					}					try {						updateCountRunningTotal += batchedStatement.executeUpdate();					} catch (SQLException ex) {						sqlEx = handleExceptionForBatch(batchCounter - 1,								numValuesPerBatch, updateCounts, ex);					}										getBatchedGeneratedKeys(batchedStatement);				}				if (sqlEx != null) {					throw new java.sql.BatchUpdateException(sqlEx							.getMessage(), sqlEx.getSQLState(), sqlEx							.getErrorCode(), updateCounts);				}								return updateCounts;			} finally {				if (batchedStatement != null) {					batchedStatement.close();				}			}		} finally {			if (timeoutTask != null) {				timeoutTask.cancel();			}			resetCancelledState();		}	}	/**	 * Computes the optimum number of batched parameter lists to send	 * without overflowing max_allowed_packet.	 * 	 * @param numBatchedArgs	 * @return	 */	protected int computeBatchSize(int numBatchedArgs) {		long[] combinedValues = computeMaxParameterSetSizeAndBatchSize(numBatchedArgs);				long maxSizeOfParameterSet = combinedValues[0];		long sizeOfEntireBatch = combinedValues[1];				int maxAllowedPacket = this.connection.getMaxAllowedPacket();				if (sizeOfEntireBatch < maxAllowedPacket - this.originalSql.length()) {			return numBatchedArgs;		}				return (int)Math.max(1, (maxAllowedPacket - this.originalSql.length()) / maxSizeOfParameterSet);	}		/** 	 *  Computes the maximum parameter set size, and entire batch size given 	 *  the number of arguments in the batch.	 */	protected long[] computeMaxParameterSetSizeAndBatchSize(int numBatchedArgs) {		long sizeOfEntireBatch = 0;		long maxSizeOfParameterSet = 0;				for (int i = 0; i < numBatchedArgs; i++) {			BatchParams paramArg = (BatchParams) this.batchedArgs			.get(i);			boolean[] isNullBatch = paramArg.isNull;			boolean[] isStreamBatch = paramArg.isStream;			long sizeOfParameterSet = 0;						for (int j = 0; j < isNullBatch.length; j++) {				if (!isNullBatch[j]) {					if (isStreamBatch[j]) {						int streamLength = paramArg.streamLengths[j];												if (streamLength != -1) {							sizeOfParameterSet += streamLength * 2; // for safety in escaping						} else {							int paramLength = paramArg.parameterStrings[j].length;							sizeOfParameterSet += paramLength;						}					} else {						sizeOfParameterSet += paramArg.parameterStrings[j].length;					}				} else {					sizeOfParameterSet += 4; // for NULL literal in SQL 				}			}						//			// Account for static part of values clause			// This is a little naiive, because the ?s will be replaced			// but it gives us some padding, and is less housekeeping			// to ignore them. We're looking for a "fuzzy" value here			// anyway			//						if (this.batchedValuesClause != null) {				sizeOfParameterSet += this.batchedValuesClause.length() + 1;			}						sizeOfEntireBatch += sizeOfParameterSet;						if (sizeOfParameterSet > maxSizeOfParameterSet) {				maxSizeOfParameterSet = sizeOfParameterSet;			}		}					return new long[] {maxSizeOfParameterSet, sizeOfEntireBatch};	}	/**	 * Executes the current batch of statements by executing them one-by-one.	 * 	 * @return a list of update counts	 * @throws SQLException	 *             if an error occurs	 */	protected int[] executeBatchSerially(int batchTimeout) throws SQLException {				Connection locallyScopedConn = this.connection;				if (locallyScopedConn == null) {			checkClosed();		}		int[] updateCounts = null;		if (this.batchedArgs != null) {			int nbrCommands = this.batchedArgs.size();			updateCounts = new int[nbrCommands];			for (int i = 0; i < nbrCommands; i++) {				updateCounts[i] = -3;			}			SQLException sqlEx = null;			int commandIndex = 0;			CancelTask timeoutTask = null;						try {				if (this.connection.getEnableQueryTimeouts() &&						batchTimeout != 0						&& this.connection.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask(this);					ConnectionImpl.getCancelTimer().schedule(timeoutTask,							batchTimeout);				}								if (this.retrieveGeneratedKeys) {					this.batchedGeneratedKeys = new ArrayList(nbrCommands);				}					for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {					Object arg = this.batchedArgs.get(commandIndex);						if (arg instanceof String) {						updateCounts[commandIndex] = executeUpdate((String) arg);					} else {						BatchParams paramArg = (BatchParams) arg;							try {							updateCounts[commandIndex] = executeUpdate(									paramArg.parameterStrings,									paramArg.parameterStreams, paramArg.isStream,									paramArg.streamLengths, paramArg.isNull, true);								if (this.retrieveGeneratedKeys) {								java.sql.ResultSet rs = null;									try {									rs = getGeneratedKeysInternal();										while (rs.next()) {										this.batchedGeneratedKeys												.add(new ByteArrayRow(new byte[][] { rs.getBytes(1) }));									}								} finally {									if (rs != null) {										rs.close();									}								}							}						} catch (SQLException ex) {							updateCounts[commandIndex] = EXECUTE_FAILED;								if (this.continueBatchOnError && 									!(ex instanceof MySQLTimeoutException) && 									!(ex instanceof MySQLStatementCancelledException) &&									!hasDeadlockOrTimeoutRolledBackTx(ex)) {								sqlEx = ex;							} else {								int[] newUpdateCounts = new int[commandIndex];								System.arraycopy(updateCounts, 0,										newUpdateCounts, 0, commandIndex);									throw new java.sql.BatchUpdateException(ex										.getMessage(), ex.getSQLState(), ex										.getErrorCode(), newUpdateCounts);							}						}					}				}					if (sqlEx != null) {					throw new java.sql.BatchUpdateException(sqlEx.getMessage(),							sqlEx.getSQLState(), sqlEx.getErrorCode(), updateCounts);				}			} finally {				if (timeoutTask != null) {					timeoutTask.cancel();				}								resetCancelledState();			}		}			return (updateCounts != null) ? updateCounts : new int[0];			}	/**	 * Actually execute the prepared statement. This is here so server-side	 * PreparedStatements can re-use most of the code from this class.	 * 	 * @param maxRowsToRetrieve	 *            the max number of rows to return	 * @param sendPacket	 *            the packet to send	 * @param createStreamingResultSet	 *            should a 'streaming' result set be created?	 * @param queryIsSelectOnly	 *            is this query doing a SELECT?	 * @param unpackFields	 *            DOCUMENT ME!	 * 	 * @return the results as a ResultSet	 * 	 * @throws SQLException	 *             if an error occurs.	 */	protected ResultSetInternalMethods executeInternal(int maxRowsToRetrieve,			Buffer sendPacket, boolean createStreamingResultSet,			boolean queryIsSelectOnly, Field[] metadataFromCache,			boolean isBatch)			throws SQLException {		try {						resetCancelledState();						ConnectionImpl locallyScopedConnection = this.connection;						this.numberOfExecutions++;				if (this.doPingInstead) {				doPingInstead();								return this.results;			}						ResultSetInternalMethods rs;						CancelTask timeoutTask = null;				try {				if (locallyScopedConnection.getEnableQueryTimeouts() &&						this.timeoutInMillis != 0						&& locallyScopedConnection.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask(this);					ConnectionImpl.getCancelTimer().schedule(timeoutTask, 							this.timeoutInMillis);				}								rs = locallyScopedConnection.execSQL(this, null, maxRowsToRetrieve, sendPacket,					this.resultSetType, this.resultSetConcurrency,					createStreamingResultSet, this.currentCatalog,					metadataFromCache, isBatch);								if (timeoutTask != null) {					timeoutTask.cancel();										if (timeoutTask.caughtWhileCancelling != null) {						throw timeoutTask.caughtWhileCancelling;					}										timeoutTask = null;				}							synchronized (this.cancelTimeoutMutex) {					if (this.wasCancelled) {						SQLException cause = null;												if (this.wasCancelledByTimeout) {							cause = new MySQLTimeoutException();						} else {							cause = new MySQLStatementCancelledException();						}												resetCancelledState();												throw cause;					}				}			} finally {				if (timeoutTask != null) {					timeoutTask.cancel();				}			}						return rs;		} catch (NullPointerException npe) {			checkClosed(); // we can't synchronize ourselves against async connection-close			               // due to deadlock issues, so this is the next best thing for			 			   // this particular corner case.						throw npe;		}	}	/**	 * A Prepared SQL query is executed and its ResultSet is returned	 * 	 * @return a ResultSet that contains the data produced by the query - never	 *         null	 * 	 * @exception SQLException	 *                if a database access error occurs	 */	public java.sql.ResultSet executeQuery() throws SQLException {		checkClosed();				ConnectionImpl locallyScopedConn = this.connection;				checkForDml(this.originalSql, this.firstCharOfStmt);		CachedResultSetMetaData cachedMetadata = null;		// We need to execute this all together		// So synchronize on the Connection's mutex (because		// even queries going through there synchronize		// on the same mutex.		synchronized (locallyScopedConn.getMutex()) {			clearWarnings();			boolean doStreaming = createStreamingResultSet();						this.batchedGeneratedKeys = null;			// Adjust net_write_timeout to a higher value if we're			// streaming result sets. More often than not, someone runs into			// an issue where they blow net_write_timeout when using this			// feature, and if they're willing to hold a result set open			// for 30 seconds or more, one more round-trip isn't going to hurt			//			// This is reset by RowDataDynamic.close().						if (doStreaming

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -