public void setCallQueueLen(int len) { callQueueLen.set(len); }
/** * Reports length of the call queue to HBaseRpcMetrics. * @param queue Which queue to report */ protected void updateCallQueueLenMetrics(BlockingQueue<Call> queue) { if (queue == callQueue) { rpcMetrics.callQueueLen.set(callQueue.size()); } else if (queue == priorityCallQueue) { rpcMetrics.priorityCallQueueLen.set(priorityCallQueue.size()); } else if (queue == replicationQueue) { rpcMetrics.replicationCallQueueLen.set(replicationQueue.size()); } else { LOG.warn("Unknown call queue"); } }
/** * Remove Block from CorruptBlocksMap * * @param blk Block to be removed */ void removeFromCorruptReplicasMap(Block blk) { if (corruptReplicasMap != null) { corruptReplicasMap.remove(blk); if (NameNode.getNameNodeMetrics() != null) { NameNode.getNameNodeMetrics().numBlocksCorrupted.set( corruptReplicasMap.size()); } } }
protected void closeConnection(Connection connection) { synchronized (connectionList) { if (connectionList.remove(connection)) { numConnections--; } } connection.close(); long bytes = 0; synchronized (connection.responseQueue) { for (Call c : connection.responseQueue) { bytes += c.response.limit(); } connection.responseQueue.clear(); } responseQueuesSizeThrottler.decrease(bytes); rpcMetrics.numOpenConnections.set(numConnections); }
@Override public void enqueueLog(Path log) { this.queue.put(log); this.metrics.sizeOfLogQueue.set(queue.size()); }
/** * Poll for the next path * @return true if a path was obtained, false if not */ protected boolean getNextPath() { try { if (this.currentPath == null) { this.currentPath = queue.poll(this.sleepForRetries, TimeUnit.MILLISECONDS); this.metrics.sizeOfLogQueue.set(queue.size()); } } catch (InterruptedException e) { LOG.warn("Interrupted while reading edits", e); } return this.currentPath != null; }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List, * int) */ public void collect(List<Chunk> events, int maxSize) throws InterruptedException { synchronized (this) { // we can't just say queue.take() here, since we're holding a lock. while (queue.isEmpty()) { this.wait(); } int size = 0; while (!queue.isEmpty() && (size < maxSize)) { Chunk e = this.queue.remove(); metrics.removedChunk.inc(); int chunkSize = e.getData().length; size += chunkSize; dataSize -= chunkSize; metrics.dataSize.set(dataSize); events.add(e); } metrics.queueSize.set(queue.size()); this.notifyAll(); } if (log.isDebugEnabled()) { log.debug("WaitingQueue.inQueueCount:" + queue.size() + "\tWaitingQueue.collectCount:" + events.size()); } }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List, * int) */ public void collect(List<Chunk> events, int maxSize) throws InterruptedException { synchronized (this) { // we can't just say queue.take() here, since we're holding a lock. while (queue.isEmpty()) { this.wait(); } int size = 0; while (!queue.isEmpty() && (size < maxSize)) { Chunk e = this.queue.remove(); metrics.removedChunk.inc(); int chunkSize = e.getData().length; size += chunkSize; dataSize -= chunkSize; metrics.dataSize.set(dataSize); events.add(e); } metrics.queueSize.set(queue.size()); this.notifyAll(); } if (log.isDebugEnabled()) { log.debug("WaitingQueue.inQueueCount:" + queue.size() + "\tWaitingQueue.collectCount:" + events.size()); } }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk) */ public void add(Chunk chunk) throws InterruptedException { assert chunk != null : "can't enqueue null chunks"; synchronized (this) { while (chunk.getData().length + dataSize > MAX_MEM_USAGE) { try { if(dataSize == 0) { //queue is empty, but data is still too big log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType() + " and source =" +chunk.getStreamName()); return; //return without sending; otherwise we'd deadlock. //this error should probably be fatal; there's no way to recover. } metrics.fullQueue.set(1); this.wait(); log.info("MemLimitQueue is full [" + dataSize + "]"); } catch (InterruptedException e) { } } metrics.fullQueue.set(0); dataSize += chunk.getData().length; queue.add(chunk); metrics.addedChunk.inc(); metrics.queueSize.set(queue.size()); metrics.dataSize.set(dataSize); this.notifyAll(); } }
/** * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk) */ public void add(Chunk chunk) throws InterruptedException { assert chunk != null : "can't enqueue null chunks"; int chunkSize = chunk.getData().length; synchronized (this) { if (chunkSize + dataSize > MAX_MEM_USAGE) { if (dataSize == 0) { // queue is empty, but data is still too big log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType() + " and source =" + chunk.getStreamName()); return; // return without sending; otherwise we'd deadlock. // this error should probably be fatal; there's no way to // recover. } else { metrics.fullQueue.set(1); log.warn("Discarding chunk due to NonBlockingMemLimitQueue full [" + dataSize + "]"); return; } } metrics.fullQueue.set(0); dataSize += chunk.getData().length; queue.add(chunk); metrics.addedChunk.inc(); metrics.queueSize.set(queue.size()); metrics.dataSize.set(dataSize); this.notifyAll(); } }
void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { Connection c; ServerSocketChannel server = (ServerSocketChannel) key.channel(); SocketChannel channel; while ((channel = server.accept()) != null) { channel.configureBlocking(false); channel.socket().setTcpNoDelay(tcpNoDelay); channel.socket().setKeepAlive(tcpKeepAlive); Reader reader = getReader(); try { reader.startAdd(); SelectionKey readKey = reader.registerChannel(channel); c = getConnection(channel, System.currentTimeMillis()); readKey.attach(c); synchronized (connectionList) { connectionList.add(numConnections, c); numConnections++; } if (LOG.isDebugEnabled()) LOG.debug("Server connection from " + c.toString() + "; # active connections: " + numConnections + "; # queued calls: " + callQueue.size()); } finally { reader.finishAdd(); } } rpcMetrics.numOpenConnections.set(numConnections); }
/** * Push the metrics to the monitoring subsystem on doUpdate() call. */ public void doUpdates(MetricsContext context) { synchronized (this) { // ToFix - fix server to use the following two metrics directly so // the metrics do not have be copied here. numOpenConnections.set(myServer.getNumOpenConnections()); callQueueLen.set(myServer.getCallQueueLen()); for (MetricsBase m : registry.getMetricsList()) { m.pushMetric(metricsRecord); } } metricsRecord.update(); }
adaptorStatsManager.remove(toStop); ChukwaAgent.agentMetrics.adaptorCount.set(adaptorsByName.size()); ChukwaAgent.agentMetrics.removedAdaptor.inc();
filesTotal.set((int) fsNameSystem.getFilesAndDirectoriesTotal()); blocksTotal.set((int)fsNameSystem.getBlocksTotal()); diskSpaceTotalGB.set(roundBytesToGBytes(fsNameSystem.getDiskSpaceTotal())); capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal())); capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed())); capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem. getCapacityRemaining())); totalLoad.set(fsNameSystem.getTotalLoad()); corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks()); excessBlocks.set((int)fsNameSystem.getExcessBlocks()); pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks()); pendingReplicationBlocks.set((int)fsNameSystem. getPendingReplicationBlocks()); underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks()); scheduledReplicationBlocks.set((int)fsNameSystem. getScheduledReplicationBlocks()); missingBlocks.set((int)fsNameSystem.getMissingBlocksCount()); blockCapacity.set(fsNameSystem.getBlockCapacity()); numLeases.set(fsNameSystem.leaseManager.countLease()); numUnderConstructionFiles.set(fsNameSystem.leaseManager.countPath()); upgradeTime.set(fsNameSystem.getUpgradeTime());
/** * Mark the block belonging to datanode as corrupt. * * @param blk Block to be added to CorruptReplicasMap * @param dn DatanodeDescriptor which holds the corrupt replica */ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn) { Collection<DatanodeDescriptor> nodes = getNodes(blk); if (nodes == null) { nodes = new TreeSet<DatanodeDescriptor>(); corruptReplicasMap.put(blk, nodes); } if (!nodes.contains(dn)) { nodes.add(dn); NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ blk.getBlockName() + " added as corrupt on " + dn.getName() + " by " + Server.getRemoteIp()); } else { NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ "duplicate requested for " + blk.getBlockName() + " to add as corrupt " + "on " + dn.getName() + " by " + Server.getRemoteIp()); } if (NameNode.getNameNodeMetrics() != null) { NameNode.getNameNodeMetrics().numBlocksCorrupted.set( corruptReplicasMap.size()); } }
synchronized (this) { FSNamesystem fsNameSystem = FSNamesystem.getFSNamesystem(); filesTotal.set((int)fsNameSystem.getFilesTotal()); filesTotal.pushMetric(metricsRecord); blocksTotal.set((int)fsNameSystem.getBlocksTotal()); blocksTotal.pushMetric(metricsRecord); capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal())); capacityTotalGB.pushMetric(metricsRecord); capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed())); capacityUsedGB.pushMetric(metricsRecord); capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem. getCapacityRemaining())); capacityRemainingGB.pushMetric(metricsRecord); totalLoad.set(fsNameSystem.getTotalLoad()); totalLoad.pushMetric(metricsRecord); pendingReplicationBlocks.set((int)fsNameSystem. getPendingReplicationBlocks()); pendingReplicationBlocks.pushMetric(metricsRecord); underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks()); underReplicatedBlocks.pushMetric(metricsRecord); scheduledReplicationBlocks.set((int)fsNameSystem.
protected void startPostSafeModeProcessing() { // if not done yet, initialize replication queues if (!namesystem.isPopulatingReplQueues()) { initializeReplQueues(); } long timeInSafemode = FSNamesystem.now() - namesystem.systemStart; NameNode.stateChangeLog.info("STATE* Leaving safe mode after " + timeInSafemode / 1000 + " secs."); NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode); if (reached >= 0) { NameNode.stateChangeLog.info("STATE* Safe mode is OFF."); } reached = -1; try { nameNode.startServerForClientRequests(); } catch (IOException ex) { nameNode.stop(); } NameNode.stateChangeLog.info("STATE* Network topology has " + namesystem.clusterMap.getNumOfRacks() + " racks and " + namesystem.clusterMap.getNumOfLeaves() + " datanodes"); NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has " + namesystem.getUnderReplicatedBlocks() + " blocks"); }
NameNode.stateChangeLog.info("STATE* Leaving safe mode after " + timeInSafemode/1000 + " secs."); NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode);
if (metrics != null) { // Metrics is non-null only when used inside name node metrics.transactions.inc((end-start)); metrics.numBufferedTransactions.set((int)(txid-synctxid));
long timeTakenToLoadFSImage = now() - systemStart; LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs"); NameNode.getNameNodeMetrics().fsImageLoadTime.set( (int) timeTakenToLoadFSImage); this.safeMode = SafeModeUtil.getInstance(conf, this);