/** * This method gets the pipeline for the current WAL. */ @Override DatanodeInfo[] getPipeline() { if (this.hdfs_out != null) { if (this.hdfs_out.getWrappedStream() instanceof DFSOutputStream) { return ((DFSOutputStream) this.hdfs_out.getWrappedStream()).getPipeline(); } } return new DatanodeInfo[0]; } }
/** * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate the * default behavior (such as setting the maxRecoveryErrorCount value for example (see * {@see org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay#testReplayEditsWrittenIntoWAL()}). This is * done using reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is * removed. * @return null if underlying stream is not ready. */ @VisibleForTesting OutputStream getOutputStream() { FSDataOutputStream fsdos = this.hdfs_out; return fsdos != null ? fsdos.getWrappedStream() : null; }
/** * This method gets the datanode replication count for the current open file. * * If the pipeline isn't started yet or is empty, you will get the default * replication factor. * * <p/>If this function returns -1, it means you * are not properly running with the HDFS-826 patch. * @throws InvocationTargetException * @throws IllegalAccessException * @throws IllegalArgumentException */ public int getNumCurrentReplicas() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException { if (refGetNumCurrentReplicas != null && outputStream != null) { OutputStream dfsOutputStream = outputStream.getWrappedStream(); if (dfsOutputStream != null) { Object repl = refGetNumCurrentReplicas.invoke(dfsOutputStream, NO_ARGS); if (repl instanceof Integer) { return ((Integer)repl).intValue(); } } } return -1; }
/** * Find the 'getNumCurrentReplicas' on the passed <code>os</code> stream. * @return Method or null. */ private Method reflectGetNumCurrentReplicas(FSDataOutputStream os) { Method m = null; if (os != null) { Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream() .getClass(); try { m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas", new Class<?>[] {}); m.setAccessible(true); } catch (NoSuchMethodException e) { logger.info("FileSystem's output stream doesn't support" + " getNumCurrentReplicas; --HDFS-826 not available; fsOut=" + wrappedStreamClass.getName() + "; err=" + e); } catch (SecurityException e) { logger.info("Doesn't have access to getNumCurrentReplicas on " + "FileSystems's output stream --HDFS-826 not available; fsOut=" + wrappedStreamClass.getName(), e); m = null; // could happen on setAccessible() } } if (m != null) { logger.debug("Using getNumCurrentReplicas--HDFS-826"); } return m; }
@Override public void sync(FSDataOutputStream outputStream) throws IOException { outputStream.getWrappedStream().flush(); outputStream.getWrappedStream(); } })
/** * This method gets the pipeline for the current walog. * * @return non-null array of DatanodeInfo */ DatanodeInfo[] getPipeLine() { if (logFile != null) { OutputStream os = logFile.getWrappedStream(); if (os instanceof DFSOutputStream) { return ((DFSOutputStream) os).getPipeline(); } } // Don't have a pipeline or can't figure it out. return EMPTY_PIPELINE; }
int current = expectedReplication; try { current = ((DFSOutputStream) logFile.getWrappedStream()).getCurrentBlockReplication(); } catch (IOException e) { fail(work, e, "getting replication level"); if (expectedReplication == 0 && logFile.getWrappedStream() instanceof DFSOutputStream) { try { expectedReplication = ((DFSOutputStream) logFile.getWrappedStream()) .getCurrentBlockReplication(); } catch (IOException e) {
/** * Method used internal to this class and for tests only. * @return The wrapped stream our writer is using; its not the * writer's 'out' FSDatoOutputStream but the stream that this 'out' wraps * (In hdfs its an instance of DFSDataOutputStream). */ // usage: see TestLogRolling.java OutputStream getOutputStream() { return this.hdfs_out.getWrappedStream(); }
@Override public OutputStream getWrappedStream() { return underlyingOS.getWrappedStream(); }
private static SwiftNativeOutputStream getSwiftNativeOutputStream( FSDataOutputStream outputStream) { OutputStream wrappedStream = outputStream.getWrappedStream(); return (SwiftNativeOutputStream) wrappedStream; }
private static SwiftNativeOutputStream getSwiftNativeOutputStream( FSDataOutputStream outputStream) { OutputStream wrappedStream = outputStream.getWrappedStream(); return (SwiftNativeOutputStream) wrappedStream; }
public static Token<BlockTokenIdentifier> getBlockToken( FSDataOutputStream out) { return ((DFSOutputStream) out.getWrappedStream()).getBlockToken(); }
/** * This method gets the pipeline for the current WAL. */ @VisibleForTesting DatanodeInfo[] getPipeLine() { if (this.hdfs_out != null) { if (this.hdfs_out.getWrappedStream() instanceof DFSOutputStream) { return ((DFSOutputStream) this.hdfs_out.getWrappedStream()).getPipeline(); } } return new DatanodeInfo[0]; } }
/** * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate * the default behavior (such as setting the maxRecoveryErrorCount value for example (see * {@link TestWALReplay#testReplayEditsWrittenIntoWAL()}). This is done using reflection on the * underlying HDFS OutputStream. * NOTE: This could be removed once Hadoop1 support is removed. * @return null if underlying stream is not ready. */ @VisibleForTesting OutputStream getOutputStream() { FSDataOutputStream fsdos = this.hdfs_out; if (fsdos == null) return null; return fsdos.getWrappedStream(); }
/** * This method gets the pipeline for the current walog. * * @return non-null array of DatanodeInfo */ DatanodeInfo[] getPipeLine() { if (null != logFile) { OutputStream os = logFile.getWrappedStream(); if (os instanceof DFSOutputStream) { return ((DFSOutputStream) os).getPipeline(); } } // Don't have a pipeline or can't figure it out. return EMPTY_PIPELINE; }
private BlockBlobAppendStream getBlockBlobAppendStream(FSDataOutputStream appendStream) { SyncableDataOutputStream dataOutputStream = null; if (appendStream.getWrappedStream() instanceof NativeAzureFileSystem.NativeAzureFsOutputStream) { NativeAzureFileSystem.NativeAzureFsOutputStream fsOutputStream = (NativeAzureFileSystem.NativeAzureFsOutputStream) appendStream.getWrappedStream(); dataOutputStream = (SyncableDataOutputStream) fsOutputStream.getOutStream(); } if (appendStream.getWrappedStream() instanceof SyncableDataOutputStream) { dataOutputStream = (SyncableDataOutputStream) appendStream.getWrappedStream(); } Assert.assertNotNull("Did not recognize " + dataOutputStream, dataOutputStream); return (BlockBlobAppendStream) dataOutputStream.getOutStream(); }
public static Token<BlockTokenIdentifier> getBlockToken( FSDataOutputStream out) { return ((DFSOutputStream) out.getWrappedStream()).getBlockToken(); }
private boolean isPageBlobStreamWrapper(FSDataOutputStream stream) { return ((SyncableDataOutputStream) stream.getWrappedStream()) .getOutStream() instanceof PageBlobOutputStream; }
private boolean isBlockBlobAppendStreamWrapper(FSDataOutputStream stream) { return ((SyncableDataOutputStream) ((NativeAzureFileSystem.NativeAzureFsOutputStream) stream.getWrappedStream()) .getOutStream()) .getOutStream() instanceof BlockBlobAppendStream; }
@Override void invoke() throws Exception { DatanodeInfo[] newNodes = new DatanodeInfo[2]; newNodes[0] = nodes[0]; newNodes[1] = nodes[1]; String[] storageIDs = {"s0", "s1"}; client.getNamenode().updatePipeline(client.getClientName(), oldBlock, newBlock, newNodes, storageIDs); // close can fail if the out.close() commit the block after block received // notifications from Datanode. // Since datanodes and output stream have still old genstamps, these // blocks will be marked as corrupt after HDFS-5723 if RECEIVED // notifications reaches namenode first and close() will fail. DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream()); }