private UserException.Builder getExceptionBuilder(final Exception ex, final String msg) { final UserException.Builder exceptionBuilder = UserException.memoryError(ex); return exceptionBuilder; }
/** * Creates an OUT_OF_MEMORY error with a prebuilt message * * @return user exception builder */ public static Builder memoryError() { return memoryError(null); }
private UserException.Builder getExceptionBuilder(final Exception ex, final HashAggErrorType errorType) { switch (errorType) { case SPILL_READ: return (ex == null) ? (UserException.dataReadError()) : (UserException.dataReadError(ex)); case SPILL_WRITE: return (ex == null) ? (UserException.dataWriteError()) : (UserException.dataWriteError(ex)); case OOM: return (ex == null) ? (UserException.memoryError()) : (UserException.memoryError(ex)); default: /* should never be hit since VectorizedHashAggOperator controls the operator type */ return null; } }
private void sendOutOfMemory(OutOfMemoryException e, final ChannelHandlerContext ctx, int coordinationId){ final UserException uex = UserException.memoryError(e) .message("Out of memory while receiving data.") .build(logger); final OutboundRpcMessage outMessage = new OutboundRpcMessage( RpcMode.RESPONSE_FAILURE, 0, coordinationId, uex.getOrCreatePBError(false) ); if (RpcConstants.EXTRA_DEBUGGING) { logger.debug("Adding message to outbound buffer. {}", outMessage); } ChannelFuture future = ctx.writeAndFlush(outMessage); // if we were unable to report back the failure make sure we close the channel otherwise we may cause the sender // to block undefinitely waiting for an ACK on this message future.addListener(ChannelFutureListener.CLOSE_ON_FAILURE); }
final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true))
private ArrowBuf allocateHelper(BufferAllocator alloc, final int requestSize) throws Exception{ try { return alloc.buffer(requestSize); } catch (OutOfMemoryException e) { UserException.Builder b = UserException.memoryError(e); rootAllocator.addUsageToExceptionContext(b); throw b.build(logger); } }
copyWatch.stop(); if (copied != recordCount) { // copier may return earlier if it runs out of memory throw UserException.memoryError().message("Ran out of memory while trying to copy the records.").build(logger);
@Override public int outputData() throws Exception { state.is(State.CAN_PRODUCE); if(batchesOutput > 0){ // only increment sv4 after first return final boolean hasMore = finalOrder.next(); if(!hasMore){ state = State.DONE; return 0; } } final int targetCount = finalOrder.getCount(); final int copied = copier.copyRecords(0, targetCount); if(copied != targetCount){ throw UserException.memoryError().message("Ran out of memory while trying to output records.").build(logger); } batchesOutput++; return outgoing.setAllCount(copied); }
@Test public void testCTAS() { OptionManager options = Mockito.mock(OptionManager.class); ReAttemptHandler attemptHandler = new ExternalAttemptHandler(options); AttemptId id = new AttemptId(); final UserException userException = UserException.memoryError(null).build(NoOutputLogger.INSTANCE); assertEquals(AttemptReason.NONE, attemptHandler.isRecoverable(new ReAttemptContext(id, userException, false, true))); }
} catch (final OutOfMemoryError e) { if (e instanceof OutOfDirectMemoryError || "Direct buffer memory".equals(e.getMessage())) { moveToState(QueryState.FAILED, UserException.memoryError(e).build(logger)); } else {
public void batchArrived(final IncomingDataBatch incomingBatch) throws FragmentSetupException, IOException { if(!incomingBatch.checkAcceptance(allocator.getHeadroom())){ deferredException.addException(UserException.memoryError() .message("Out of memory while receiving incoming message. Message size: %d, Current thread allocation: %d, thread limit: %d.", incomingBatch.size(), allocator.getAllocatedMemory(), allocator.getLimit()) .build(logger)); return; } // we want to make sure that we only generate local record batch reference in the case that we're not closed. // Otherwise we would leak memory. try (AutoCloseableLock lock = sharedIncomingBatchLock.open()) { if (closed) { return; } final DataCollector collector = collector(incomingBatch.getHeader().getSendingMajorFragmentId()); synchronized (collector) { try(final RawFragmentBatch newRawFragmentBatch = incomingBatch.newRawFragmentBatch(allocator)){ collector.batchArrived(incomingBatch.getHeader().getSendingMinorFragmentId(), newRawFragmentBatch); } } } }
final int copiedRecords = copier.copyRecords(0, count); if (copiedRecords != count) { throw UserException.memoryError().message("Ran out of memory while trying to sort records.").build(logger);
services.protect(allocator); } catch (final OutOfMemoryException e) { throw UserException.memoryError(e) .addContext("Fragment", handle.getMajorFragmentId() + ":" + handle.getMinorFragmentId()) .build(logger);