Refine search
resEx = chainException(resEx, new SQLException(e.getMessage(), sqlState, code, e)); resEx = chainException(resEx, new SQLException(e.getMessage(), SqlStateCode.INTERNAL_ERROR, IgniteQueryErrorCode.UNKNOWN, e)); SQLException e = new SQLException("Failed to INSERT some keys because they are already in cache [keys=" + snd.failedKeys() + ']', SqlStateCode.CONSTRAINT_VIOLATION, DUPLICATE_KEY); BatchUpdateException e = new BatchUpdateException(resEx.getMessage(), resEx.getSQLState(), resEx.getErrorCode(), snd.perRowCounterAsArray(), resEx);
int paramCnt = 1; pstmt.setString(paramCnt++, "p" + i); pstmt.setInt(paramCnt++, i); pstmt.setString(paramCnt++, "Name" + i); pstmt.setString(paramCnt++, "Lastname" + i); pstmt.setInt(paramCnt++, 20 + i); int[] updCnts = e.getUpdateCounts(); assertEquals("Invalid update count", i != FAILED_IDX ? 1 : Statement.EXECUTE_FAILED, updCnts[i]); if (!e.getMessage().contains("Value conversion failed")) { log.error("Invalid exception: ", e); assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState()); assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
/** * Inserts the test data via a direct route (JDBC). */ public void prepareTest() { try { dataSourceUtils.insertTestData( testData ); } catch (BatchUpdateException e) { throw new RuntimeException( e.getNextException() ); } catch (SQLException e) { throw new RuntimeException( e ); } }
public void handleError(SQLException newError) { if (batchException == null) { int[] successCounts; if (resultIndex >= updateCounts.length) successCounts = updateCounts; else { successCounts = new int[resultIndex]; System.arraycopy(updateCounts, 0, successCounts, 0, resultIndex); } String queryString = "<unknown>"; if (resultIndex < queries.length) queryString = queries[resultIndex].toString(parameterLists[resultIndex]); batchException = new BatchUpdateException(GT.tr("Batch entry {0} {1} was aborted. Call getNextException to see the cause.", new Object[]{ new Integer(resultIndex), queryString}), newError.getSQLState(), successCounts); } batchException.setNextException(newError); }
public void handleCompletion() throws SQLException { updateGeneratedKeys(); SQLException batchException = getException(); if (batchException != null) { if (isAutoCommit()) { // Re-create batch exception since rows after exception might indeed succeed. BatchUpdateException newException = new BatchUpdateException( batchException.getMessage(), batchException.getSQLState(), uncompressUpdateCount() ); newException.initCause(batchException.getCause()); SQLException next = batchException.getNextException(); if (next != null) { newException.setNextException(next); } batchException = newException; } throw batchException; } }
/** * Extract batching error from general exception. * @param e Exception * @param rowsAffected List containing the number of affected rows for every query in batch. * @param err Error tuple containing error code and error message. */ private static void extractBatchError(Exception e, List<Long> rowsAffected, IgniteBiTuple<Integer, String> err) { if (e instanceof IgniteSQLException) { BatchUpdateException batchCause = X.cause(e, BatchUpdateException.class); if (batchCause != null) { if (rowsAffected != null) { for (long cnt : batchCause.getLargeUpdateCounts()) rowsAffected.add(cnt); } err.set(batchCause.getErrorCode(), batchCause.getMessage()); } else err.set(((IgniteSQLException)e).statusCode(), OdbcUtils.tryRetrieveH2ErrorMessage(e)); } else err.set(IgniteQueryErrorCode.UNKNOWN, e.getMessage()); }
String mapUuid = mapTypeAndUuid[1]; pStmt.setInt(1, mapTypeId); pStmt.setString(2, mapType); pStmt.setBoolean(3, false); pStmt.setBoolean(4, false); pStmt.setDate(5, new Date(Calendar.getInstance().getTimeInMillis())); int[] updateCounts = be.getUpdateCounts();
private void addLabelGroup(String key, ApsProperties labels, Connection conn) throws ApsSystemException { PreparedStatement stat = null; try { stat = conn.prepareStatement(ADD_LABEL); Iterator<Object> labelKeysIter = labels.keySet().iterator(); while (labelKeysIter.hasNext()) { String labelLangCode = (String) labelKeysIter.next(); String label = labels.getProperty(labelLangCode); stat.setString(1, key); stat.setString(2, labelLangCode); stat.setString(3, label); stat.addBatch(); stat.clearParameters(); } stat.executeBatch(); } catch (BatchUpdateException e) { _logger.error("Error adding a new label record", e.getNextException()); throw new RuntimeException("Error adding a new label record", e.getNextException()); //processDaoException(e.getNextException(), "Error adding a new label record", "addLabel"); } catch (Throwable t) { _logger.error("Error while adding a new label", t); throw new RuntimeException("Error while adding a new label", t); //processDaoException(t, "Error while adding a new label", "addLabel"); } finally { closeDaoResources(null, stat); } }
@Override protected boolean insertBatch(PreparedStatement pstmt) throws SQLException { for (JdbcEntryData pendingEntry : TeradataBufferedInserter.this.pendingInserts) { int i = 1; for (JdbcEntryDatum datum : pendingEntry) { Object value = datum.getVal(); if (value != null) { pstmt.setObject(i, value); } else { // Column type is needed for null value insertion pstmt.setNull(i, columnPosSqlTypes.get(i)); } i++; } pstmt.addBatch(); pstmt.clearParameters(); } if (LOG.isDebugEnabled()) { LOG.debug("Executing SQL " + pstmt); } int[] execStatus = pstmt.executeBatch(); // Check status explicitly if driver continues batch insertion upon failure for (int status : execStatus) { if (status == Statement.EXECUTE_FAILED) { throw new BatchUpdateException("Batch insert failed.", execStatus); } } return true; }
for (int i = 0; i < docId.getId().length; i++) { String pkElement = docId.getId()[i]; ps.setString(i + 1, pkElement); ps.addBatch(); ps.executeBatch(); } catch (BatchUpdateException e) { if (e.getMessage().contains("deadlock detected")) { log.debug("Database transaction deadlock detected while trying to set the last component. Trying again."); tryagain = true; e.printStackTrace(); SQLException nextException = e.getNextException(); if (null == nextException) throw new XmiDataInsertionException(e); else nextException.printStackTrace(); throw new XmiDataInsertionException(nextException); } finally {
@Test public void testInsertConstraintViolation() { LOGGER.info("testInsertPosts"); doInJPA(entityManager -> { Session session = entityManager.unwrap(Session.class); session.doWork(connection -> { try (PreparedStatement st = connection.prepareStatement( "INSERT INTO post (id, title) " + "VALUES (?, ?)")) { for (long i = 0; i < 5; i++) { st.setLong(1, i % 2); st.setString(2, String.format("High-Performance Java Persistence, Part %d", i)); st.addBatch(); } st.executeBatch(); } catch (BatchUpdateException e) { LOGGER.info("Batch has managed to process {} entries", e.getUpdateCounts().length); } }); }); }
parameterBinder.bindParameters(statement, element, null); if (batchInStatement) { statement.addBatch(); } else { listener.beforeExecuteBatchUpdate(statement, sql); result[i] = statement.executeUpdate(); listener.afterExecuteBatchUpdate(statement, result); readGeneratedKeys(i, statement); result = statement.executeBatch(); listener.afterExecuteBatchUpdate(statement, result); readGeneratedKeys(0, statement); result = e.getUpdateCounts(); if (result == null) { throw new PersistenceException(e);
/** * INTERNAL */ @Override public void printStackTrace(PrintStream s) { if (s != null) { super.printStackTrace(s); if (getNextException() != null) { getNextException().printStackTrace(s); } } }
new BatchUpdateException(sqlEx.getMessage(), sqlEx.getSQLState(), sqlEx.getErrorCode(), updateCounts); batchEx.setNextException(sqlEx.getNextException()); throw batchEx; throw new BatchUpdateException(ex.getMessage(), ex.getSQLState(), ex.getErrorCode(), new int[0]); } finally {
Tr.entry(tc, "isSQLErrorTransient ", new Object[] { sqlex, this }); boolean retryBatch = false; int sqlErrorCode = sqlex.getErrorCode(); Tr.event(tc, " Message: " + sqlex.getMessage()); Tr.event(tc, " SQLSTATE: " + sqlex.getSQLState()); Tr.event(tc, " Error code: " + sqlErrorCode); BatchUpdateException buex = (BatchUpdateException) sqlex; Tr.event(tc, "BatchUpdateException: Update Counts - "); int[] updateCounts = buex.getUpdateCounts(); for (int i = 0; i < updateCounts.length; i++) { Tr.event(tc, " Statement " + i + ":" + updateCounts[i]); SQLException nextex = buex.getNextException(); while (nextex != null) { sqlErrorCode = nextex.getErrorCode();
try (PreparedStatement ps = conn.prepareStatement(sql)) { for (String dt : datesToApply) { ps.setString(1, dt); ps.addBatch(); } catch (BatchUpdateException bue) { System.out.println("executeBatch threw BatchUpdateException: " + bue.getMessage()); updateCounts = bue.getUpdateCounts(); } catch (SQLException se) { System.out.println("executeBatch threw SQLException: " + se.getMessage());
@Override public void handleError(SQLException newError) { if (getException() == null) { Arrays.fill(updateCounts, committedRows, updateCounts.length, Statement.EXECUTE_FAILED); if (allGeneratedRows != null) { allGeneratedRows.clear(); } String queryString = "<unknown>"; if (resultIndex < queries.length) { queryString = queries[resultIndex].toString(parameterLists[resultIndex]); } BatchUpdateException batchException = new BatchUpdateException( GT.tr("Batch entry {0} {1} was aborted: {2} Call getNextException to see other errors in the batch.", resultIndex, queryString, newError.getMessage()), newError.getSQLState(), uncompressUpdateCount()); batchException.initCause(newError); super.handleError(batchException); } resultIndex++; super.handleError(newError); }
private synchronized void addOntologyPatterns(URI physicalURI, OWLOntology ontology, Multiset<OWLAxiom> patterns){ int ontologyId = getOntologyID(physicalURI, ontology); for (OWLAxiom pattern : patterns.elementSet()) { try { int patternId = addPattern(pattern); int occurrences = patterns.count(pattern); insertOntologyPatternPs.setInt(1, ontologyId); insertOntologyPatternPs.setInt(2, patternId); insertOntologyPatternPs.setInt(3, occurrences); insertOntologyPatternPs.addBatch(); } catch (SQLException e) { LOGGER.error("Failed to insert pattern\n" + pattern + "\"", e); } } try { insertOntologyPatternPs.executeBatch(); } catch (BatchUpdateException e) { LOGGER.error("Failed to insert some pattern. Reason: {}", e.getMessage()); } catch (SQLException e) { LOGGER.error("Failed to insert patterns.", e); } }
ps.addBatch(); batch++; if (batch % UPDATE_BATCH_SIZE == 0 || !rowIt.hasNext()) { ps.executeBatch(); countExecute(); ps.execute(); countExecute(); if (e instanceof BatchUpdateException) { BatchUpdateException bue = (BatchUpdateException) e; if (e.getCause() == null && bue.getNextException() != null) { e.initCause(bue.getNextException());
@Test public void batchExceptionTranslation() { SQLExceptionTranslator sext = new SQLErrorCodeSQLExceptionTranslator(ERROR_CODES); SQLException badSqlEx = new SQLException("", "", 1); BatchUpdateException batchUpdateEx = new BatchUpdateException(); batchUpdateEx.setNextException(badSqlEx); BadSqlGrammarException bsgex = (BadSqlGrammarException) sext.translate("task", "SQL", batchUpdateEx); assertEquals("SQL", bsgex.getSql()); assertEquals(badSqlEx, bsgex.getSQLException()); }