FSUtils fsUtils = FSUtils.getInstance(dfs, conf); DFSClient client = dfs.getClient(); String clientName = client.getClientName(); ClientProtocol namenode = client.getNamenode(); int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, try { DataChecksum summer = createChecksum(client); locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, excludesNodes, stat.getFileId(), null); List<Channel> datanodeList = new ArrayList<>();
@Override public int compare(final DFSClient left, final DFSClient right) { return left.getClientName().compareTo(right.getClientName()); } });
@Override public int compare(final DFSClient left, final DFSClient right) { return left.getClientName().compareTo(right.getClientName()); } });
@Override public int compare(final DFSClient left, final DFSClient right) { return left.getClientName().compareTo(right.getClientName()); } });
/** Get the names of all clients */ private synchronized String clientsString() { if (dfsclients.isEmpty()) { return "[]"; } else { final StringBuilder b = new StringBuilder("[").append( dfsclients.get(0).getClientName()); for(int i = 1; i < dfsclients.size(); i++) { b.append(", ").append(dfsclients.get(i).getClientName()); } return b.append("]").toString(); } }
/** Get the names of all clients */ private synchronized String clientsString() { if (dfsclients.isEmpty()) { return "[]"; } else { final StringBuilder b = new StringBuilder("[").append( dfsclients.get(0).getClientName()); for(int i = 1; i < dfsclients.size(); i++) { b.append(", ").append(dfsclients.get(i).getClientName()); } return b.append("]").toString(); } } }
/** Get the names of all clients */ private synchronized String clientsString() { if (dfsclients.isEmpty()) { return "[]"; } else { final StringBuilder b = new StringBuilder("[").append( dfsclients.get(0).getClientName()); for(int i = 1; i < dfsclients.size(); i++) { b.append(", ").append(dfsclients.get(i).getClientName()); } return b.append("]").toString(); } } }
private void renew() throws IOException { final List<DFSClient> copies; synchronized(this) { copies = new ArrayList<>(dfsclients); } //sort the client names for finding out repeated names. Collections.sort(copies, new Comparator<DFSClient>() { @Override public int compare(final DFSClient left, final DFSClient right) { return left.getClientName().compareTo(right.getClientName()); } }); String previousName = ""; for (final DFSClient c : copies) { //skip if current client name is the same as the previous name. if (!c.getClientName().equals(previousName)) { if (!c.renewLease()) { LOG.debug("Did not renew lease for client {}", c); continue; } previousName = c.getClientName(); LOG.debug("Lease renewed for client {}", previousName); } } }
final DFSClient c = copies.get(i); if (!c.getClientName().equals(previousName)) { if (!c.renewLease()) { if (LOG.isDebugEnabled()) { previousName = c.getClientName(); if (LOG.isDebugEnabled()) { LOG.debug("Lease renewed for client " + previousName);
final DFSClient c = copies.get(i); if (!c.getClientName().equals(previousName)) { if (!c.renewLease()) { if (LOG.isDebugEnabled()) { previousName = c.getClientName(); if (LOG.isDebugEnabled()) { LOG.debug("Lease renewed for client " + previousName);
private DFSClient createMockClient() { DFSClient mock = Mockito.mock(DFSClient.class); Mockito.doReturn(true) .when(mock).isClientRunning(); Mockito.doReturn((int)FAST_GRACE_PERIOD) .when(mock).getHdfsTimeout(); Mockito.doReturn("myclient") .when(mock).getClientName(); return mock; }
private DFSClient createMockClient() { final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class); Mockito.doReturn((int)FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout(); DFSClient mock = Mockito.mock(DFSClient.class); Mockito.doReturn(true).when(mock).isClientRunning(); Mockito.doReturn(mockConf).when(mock).getConf(); Mockito.doReturn("myclient").when(mock).getClientName(); return mock; }
@Override void invoke() throws Exception { lbk = client.getNamenode().append(fileName, client.getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))); }
@Override void invoke() throws Exception { EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE); this.status = client.getNamenode().create(fileName, FsPermission.getFileDefault(), client.getClientName(), new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes, BlockSize, new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES}); }
@Override void invoke() throws Exception { DatanodeInfo[] newNodes = new DatanodeInfo[2]; newNodes[0] = nodes[0]; newNodes[1] = nodes[1]; String[] storageIDs = {"s0", "s1"}; client.getNamenode().updatePipeline(client.getClientName(), oldBlock, newBlock, newNodes, storageIDs); // close can fail if the out.close() commit the block after block received // notifications from Datanode. // Since datanodes and output stream have still old genstamps, these // blocks will be marked as corrupt after HDFS-5723 if RECEIVED // notifications reaches namenode first and close() will fail. DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream()); }
@Test public void testOpenFilesWithRename() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // check for zero sized blocks Path fileWithEmptyBlock = new Path("/test/test/test4"); fs.create(fileWithEmptyBlock); NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); fs.delete(new Path("/test/test-renamed"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); } }
@Override void prepare() throws Exception { final Path filePath = new Path(file); DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); // append to the file and leave the last block under construction out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND), null, null); byte[] appendContent = new byte[100]; new Random().nextBytes(appendContent); out.write(appendContent); ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); LocatedBlocks blks = dfs.getClient() .getLocatedBlocks(file, BlockSize + 1); assertEquals(1, blks.getLocatedBlocks().size()); nodes = blks.get(0).getLocations(); oldBlock = blks.get(0).getBlock(); LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline( oldBlock, client.getClientName()); newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLbk.getBlock().getGenerationStamp()); }
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null);