⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 preparedstatement.java

📁 mysql 5.1的 jdbc驱动 Connector/J 5.1 支持Mysql 4.1、Mysql 5.0、Mysql 5.1、Mysql 6.0 alpha这些版本。 Connector/J
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
									numValuesPerBatch, updateCounts, ex);						}						getBatchedGeneratedKeys(batchedStatement);						batchedStatement.clearParameters();						batchedParamIndex = 1;					}					batchedParamIndex = setOneBatchedParameterSet(							batchedStatement, batchedParamIndex,							this.batchedArgs.get(batchCounter++));				}				try {					updateCountRunningTotal += batchedStatement.executeUpdate();				} catch (SQLException ex) {					sqlEx = handleExceptionForBatch(batchCounter - 1,							numValuesPerBatch, updateCounts, ex);				}								getBatchedGeneratedKeys(batchedStatement);				numValuesPerBatch = numBatchedArgs - batchCounter;			} finally {				if (batchedStatement != null) {					batchedStatement.close();				}			}			try {				if (numValuesPerBatch > 0) {					if (this.retrieveGeneratedKeys) {						batchedStatement = locallyScopedConn.prepareStatement(								generateBatchedInsertSQL(valuesClause,										numValuesPerBatch),								RETURN_GENERATED_KEYS);					} else {						batchedStatement = locallyScopedConn								.prepareStatement(generateBatchedInsertSQL(										valuesClause, numValuesPerBatch));					}					if (timeoutTask != null) {						timeoutTask.toCancel = (StatementImpl) batchedStatement;					}					batchedParamIndex = 1;					while (batchCounter < numBatchedArgs) {						batchedParamIndex = setOneBatchedParameterSet(								batchedStatement, batchedParamIndex,								this.batchedArgs.get(batchCounter++));					}					try {						updateCountRunningTotal += batchedStatement.executeUpdate();					} catch (SQLException ex) {						sqlEx = handleExceptionForBatch(batchCounter - 1,								numValuesPerBatch, updateCounts, ex);					}										getBatchedGeneratedKeys(batchedStatement);				}				if (sqlEx != null) {					throw new java.sql.BatchUpdateException(sqlEx							.getMessage(), sqlEx.getSQLState(), sqlEx							.getErrorCode(), updateCounts);				}								return updateCounts;			} finally {				if (batchedStatement != null) {					batchedStatement.close();				}			}		} finally {			if (timeoutTask != null) {				timeoutTask.cancel();			}			resetCancelledState();		}	}	/**	 * Computes the optimum number of batched parameter lists to send	 * without overflowing max_allowed_packet.	 * 	 * @param numBatchedArgs	 * @return	 */	protected int computeBatchSize(int numBatchedArgs) {		long[] combinedValues = computeMaxParameterSetSizeAndBatchSize(numBatchedArgs);				long maxSizeOfParameterSet = combinedValues[0];		long sizeOfEntireBatch = combinedValues[1];				int maxAllowedPacket = this.connection.getMaxAllowedPacket();				if (sizeOfEntireBatch < maxAllowedPacket - this.originalSql.length()) {			return numBatchedArgs;		}				return (int)Math.max(1, (maxAllowedPacket - this.originalSql.length()) / maxSizeOfParameterSet);	}		/** 	 *  Computes the maximum parameter set size, and entire batch size given 	 *  the number of arguments in the batch.	 */	protected long[] computeMaxParameterSetSizeAndBatchSize(int numBatchedArgs) {		long sizeOfEntireBatch = 0;		long maxSizeOfParameterSet = 0;				for (int i = 0; i < numBatchedArgs; i++) {			BatchParams paramArg = (BatchParams) this.batchedArgs			.get(i);			boolean[] isNullBatch = paramArg.isNull;			boolean[] isStreamBatch = paramArg.isStream;			long sizeOfParameterSet = 0;						for (int j = 0; j < isNullBatch.length; j++) {				if (!isNullBatch[j]) {					if (isStreamBatch[j]) {						int streamLength = paramArg.streamLengths[j];												if (streamLength != -1) {							sizeOfParameterSet += streamLength * 2; // for safety in escaping						} else {							int paramLength = paramArg.parameterStrings[j].length;							sizeOfParameterSet += paramLength;						}					} else {						sizeOfParameterSet += paramArg.parameterStrings[j].length;					}				} else {					sizeOfParameterSet += 4; // for NULL literal in SQL 				}			}						//			// Account for static part of values clause			// This is a little naiive, because the ?s will be replaced			// but it gives us some padding, and is less housekeeping			// to ignore them. We're looking for a "fuzzy" value here			// anyway			//						if (this.batchedValuesClause != null) {				sizeOfParameterSet += this.batchedValuesClause.length() + 1;			}						sizeOfEntireBatch += sizeOfParameterSet;						if (sizeOfParameterSet > maxSizeOfParameterSet) {				maxSizeOfParameterSet = sizeOfParameterSet;			}		}					return new long[] {maxSizeOfParameterSet, sizeOfEntireBatch};	}	/**	 * Executes the current batch of statements by executing them one-by-one.	 * 	 * @return a list of update counts	 * @throws SQLException	 *             if an error occurs	 */	protected int[] executeBatchSerially(int batchTimeout) throws SQLException {				Connection locallyScopedConn = this.connection;				if (locallyScopedConn == null) {			checkClosed();		}		int[] updateCounts = null;		if (this.batchedArgs != null) {			int nbrCommands = this.batchedArgs.size();			updateCounts = new int[nbrCommands];			for (int i = 0; i < nbrCommands; i++) {				updateCounts[i] = -3;			}			SQLException sqlEx = null;			int commandIndex = 0;			CancelTask timeoutTask = null;						try {				if (this.connection.getEnableQueryTimeouts() &&						batchTimeout != 0						&& this.connection.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask(this);					ConnectionImpl.getCancelTimer().schedule(timeoutTask,							batchTimeout);				}								if (this.retrieveGeneratedKeys) {					this.batchedGeneratedKeys = new ArrayList(nbrCommands);				}					for (commandIndex = 0; commandIndex < nbrCommands; commandIndex++) {					Object arg = this.batchedArgs.get(commandIndex);						if (arg instanceof String) {						updateCounts[commandIndex] = executeUpdate((String) arg);					} else {						BatchParams paramArg = (BatchParams) arg;							try {							updateCounts[commandIndex] = executeUpdate(									paramArg.parameterStrings,									paramArg.parameterStreams, paramArg.isStream,									paramArg.streamLengths, paramArg.isNull, true);								if (this.retrieveGeneratedKeys) {								java.sql.ResultSet rs = null;									try {									rs = getGeneratedKeysInternal();										while (rs.next()) {										this.batchedGeneratedKeys												.add(new ByteArrayRow(new byte[][] { rs.getBytes(1) }));									}								} finally {									if (rs != null) {										rs.close();									}								}							}						} catch (SQLException ex) {							updateCounts[commandIndex] = EXECUTE_FAILED;								if (this.continueBatchOnError && 									!(ex instanceof MySQLTimeoutException) && 									!(ex instanceof MySQLStatementCancelledException)) {								sqlEx = ex;							} else {								int[] newUpdateCounts = new int[commandIndex];								System.arraycopy(updateCounts, 0,										newUpdateCounts, 0, commandIndex);									throw new java.sql.BatchUpdateException(ex										.getMessage(), ex.getSQLState(), ex										.getErrorCode(), newUpdateCounts);							}						}					}				}					if (sqlEx != null) {					throw new java.sql.BatchUpdateException(sqlEx.getMessage(),							sqlEx.getSQLState(), sqlEx.getErrorCode(), updateCounts);				}			} finally {				if (timeoutTask != null) {					timeoutTask.cancel();				}								resetCancelledState();			}		}			return (updateCounts != null) ? updateCounts : new int[0];			}	/**	 * Actually execute the prepared statement. This is here so server-side	 * PreparedStatements can re-use most of the code from this class.	 * 	 * @param maxRowsToRetrieve	 *            the max number of rows to return	 * @param sendPacket	 *            the packet to send	 * @param createStreamingResultSet	 *            should a 'streaming' result set be created?	 * @param queryIsSelectOnly	 *            is this query doing a SELECT?	 * @param unpackFields	 *            DOCUMENT ME!	 * 	 * @return the results as a ResultSet	 * 	 * @throws SQLException	 *             if an error occurs.	 */	protected ResultSetInternalMethods executeInternal(int maxRowsToRetrieve,			Buffer sendPacket, boolean createStreamingResultSet,			boolean queryIsSelectOnly, Field[] metadataFromCache,			boolean isBatch)			throws SQLException {		try {						resetCancelledState();						ConnectionImpl locallyScopedConnection = this.connection;						this.numberOfExecutions++;				if (this.doPingInstead) {				doPingInstead();								return this.results;			}						ResultSetInternalMethods rs;						CancelTask timeoutTask = null;				try {				if (locallyScopedConnection.getEnableQueryTimeouts() &&						this.timeoutInMillis != 0						&& locallyScopedConnection.versionMeetsMinimum(5, 0, 0)) {					timeoutTask = new CancelTask(this);					ConnectionImpl.getCancelTimer().schedule(timeoutTask, 							this.timeoutInMillis);				}								rs = locallyScopedConnection.execSQL(this, null, maxRowsToRetrieve, sendPacket,					this.resultSetType, this.resultSetConcurrency,					createStreamingResultSet, this.currentCatalog,					metadataFromCache, isBatch);								if (timeoutTask != null) {					timeoutTask.cancel();										if (timeoutTask.caughtWhileCancelling != null) {						throw timeoutTask.caughtWhileCancelling;					}										timeoutTask = null;				}							synchronized (this.cancelTimeoutMutex) {					if (this.wasCancelled) {						SQLException cause = null;												if (this.wasCancelledByTimeout) {							cause = new MySQLTimeoutException();						} else {							cause = new MySQLStatementCancelledException();						}												resetCancelledState();												throw cause;					}				}			} finally {				if (timeoutTask != null) {					timeoutTask.cancel();				}			}						return rs;		} catch (NullPointerException npe) {			checkClosed(); // we can't synchronize ourselves against async connection-close			               // due to deadlock issues, so this is the next best thing for			 			   // this particular corner case.						throw npe;		}	}	/**	 * A Prepared SQL query is executed and its ResultSet is returned	 * 	 * @return a ResultSet that contains the data produced by the query - never	 *         null	 * 	 * @exception SQLException	 *                if a database access error occurs	 */	public java.sql.ResultSet executeQuery() throws SQLException {		checkClosed();				ConnectionImpl locallyScopedConn = this.connection;				checkForDml(this.originalSql, this.firstCharOfStmt);		CachedResultSetMetaData cachedMetadata = null;		// We need to execute this all together		// So synchronize on the Connection's mutex (because		// even queries going through there synchronize		// on the same mutex.		synchronized (locallyScopedConn.getMutex()) {			clearWarnings();			boolean doStreaming = createStreamingResultSet();						this.batchedGeneratedKeys = null;			// Adjust net_write_timeout to a higher value if we're			// streaming result sets. More often than not, someone runs into			// an issue where they blow net_write_timeout when using this			// feature, and if they're willing to hold a result set open			// for 30 seconds or more, one more round-trip isn't going to hurt			//			// This is reset by RowDataDynamic.close().						if (doStreaming					&& this.connection.getNetTimeoutForStreamingResults() > 0) {				locallyScopedConn.execSQL(this, "SET net_write_timeout="						+ this.connection.getNetTimeoutForStreamingResults(),						-1, null, ResultSet.TYPE_FORWARD_ONLY,						ResultSet.CONCUR_READ_ONLY, false, this.currentCatalog,						null, false);			}						Buffer sendPacket = fillSendPacket();			if (this.results != null) {				if (!this.connection.getHoldResultsOpenOverStatementClose()) {					if (!this.holdResultsOpenOverClose) {						this.results.realClose(false);					}				}			}			String oldCatalog = null;			if (!locallyScopedConn.getCatalog().equals(this.currentCatalog)) {				oldCatalog = locallyScopedConn.getCatalog();				locallyScopedConn.setCatalog(this.currentCatalog);			}			//			// Check if we have cached metadata for this query...			//			if (locallyScopedConn.getCacheResultSetMetadata()) {				cachedMetadata = locallyScopedConn.getCachedMetaData(this.originalSql);			}			Field[] metadataFromCache = null;						if (cachedMetadata != null) {				metadataFromCache = cachedMetadata.fields;			}						if (locallyScopedConn.useMaxRows()) {				// If there isn't a limit clause in the SQL				// then limit the number of rows to return in				// an efficient manner. Only do this if				// setMaxRows() hasn't been used on any Statements				// generated from the current Connection (saves				// a query, and network traffic).				if (this.hasLimitClause) {					this.results = executeInternal(this.maxRows, sendPacket,							createStreamingResultSet(), true,							metadataFromCache, false);				} else {					if (this.maxRows <= 0) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -