/** * Initialize the DistributedWorkQueue using the proper ZK location */ protected void initializeWorkQueue(AccumuloConfiguration conf) { workQueue = new DistributedWorkQueue( ZooUtil.getRoot(client.getInstanceID()) + ReplicationConstants.ZOO_WORK_QUEUE, conf); }
private void initiateSort(String sortId, String source, final String destination) throws KeeperException, InterruptedException { String work = source + "|" + destination; new DistributedWorkQueue(master.getZooKeeperRoot() + Constants.ZRECOVERY, master.getConfiguration()).addWork(sortId, work.getBytes(UTF_8)); synchronized (this) { sortsQueued.add(sortId); } final String path = master.getZooKeeperRoot() + Constants.ZRECOVERY + "/" + sortId; log.info("Created zookeeper entry {} with data {}", path, work); }
public void startWatchingForRecoveryLogs(ThreadPoolExecutor distWorkQThreadPool) throws KeeperException, InterruptedException { this.threadPool = distWorkQThreadPool; new DistributedWorkQueue(context.getZooKeeperRoot() + Constants.ZRECOVERY, conf) .startProcessing(new LogProcessor(), this.threadPool); }
public RecoveryManager(Master master) { this.master = master; executor = Executors.newScheduledThreadPool(4, new NamingThreadFactory("Walog sort starter ")); zooCache = new ZooCache(master.getContext().getZooReaderWriter(), null); try { List<String> workIDs = new DistributedWorkQueue( master.getZooKeeperRoot() + Constants.ZRECOVERY, master.getConfiguration()) .getWorkQueued(); sortsQueued.addAll(workIDs); } catch (Exception e) { log.warn("{}", e.getMessage(), e); } }
DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue( Constants.ZROOT + "/" + master.getInstanceID() + Constants.ZBULK_FAILED_COPYQ, master.getConfiguration());
@Override public void run() { DefaultConfiguration defaultConf = DefaultConfiguration.getInstance(); long defaultDelay = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY); long defaultPeriod = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD); long delay = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY); long period = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD); try { DistributedWorkQueue workQueue; if (defaultDelay != delay && defaultPeriod != period) { log.debug("Configuration DistributedWorkQueue with delay and period of {} and {}", delay, period); workQueue = new DistributedWorkQueue( context.getZooKeeperRoot() + ReplicationConstants.ZOO_WORK_QUEUE, conf, delay, period); } else { log.debug("Configuring DistributedWorkQueue with default delay and period"); workQueue = new DistributedWorkQueue( context.getZooKeeperRoot() + ReplicationConstants.ZOO_WORK_QUEUE, conf); } workQueue.startProcessing(new ReplicationProcessor(context, conf, fs), executor); } catch (KeeperException | InterruptedException e) { throw new RuntimeException(e); } } }
getConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue"); bulkFailedCopyQ = new DistributedWorkQueue( context.getZooKeeperRoot() + Constants.ZBULK_FAILED_COPYQ, getConfiguration()); try {
/** * Initialize the DistributedWorkQueue using the proper ZK location */ protected void initializeWorkQueue(AccumuloConfiguration conf) { workQueue = new DistributedWorkQueue( ZooUtil.getRoot(conn.getInstance()) + ReplicationConstants.ZOO_WORK_QUEUE, conf); }
private void initiateSort(String sortId, String source, final String destination, AccumuloConfiguration aconf) throws KeeperException, InterruptedException, IOException { String work = source + "|" + destination; new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY, aconf) .addWork(sortId, work.getBytes(UTF_8)); synchronized (this) { sortsQueued.add(sortId); } final String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY + "/" + sortId; log.info("Created zookeeper entry " + path + " with data " + work); }
public RecoveryManager(Master master) { this.master = master; executor = Executors.newScheduledThreadPool(4, new NamingThreadFactory("Walog sort starter ")); zooCache = new ZooCache(); try { List<String> workIDs = new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY).getWorkQueued(); sortsQueued.addAll(workIDs); } catch (Exception e) { log.warn(e, e); } }
private void initiateSort(String host, final String file) throws KeeperException, InterruptedException { String source = getSource(host, file).toString(); new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY).addWork(file, source.getBytes(UTF_8)); synchronized (this) { sortsQueued.add(file); } final String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY + "/" + file; log.info("Created zookeeper entry " + path + " with data " + source); }
public void startWatchingForRecoveryLogs(ThreadPoolExecutor distWorkQThreadPool) throws KeeperException, InterruptedException { this.threadPool = distWorkQThreadPool; new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZRECOVERY, conf) .startProcessing(new LogProcessor(), this.threadPool); }
public RecoveryManager(Master master) { this.master = master; executor = Executors.newScheduledThreadPool(4, new NamingThreadFactory("Walog sort starter ")); zooCache = new ZooCache(); try { AccumuloConfiguration aconf = master.getConfiguration(); List<String> workIDs = new DistributedWorkQueue( ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY, aconf).getWorkQueued(); sortsQueued.addAll(workIDs); } catch (Exception e) { log.warn("{}", e.getMessage(), e); } }
public void startWatchingForRecoveryLogs(ThreadPoolExecutor distWorkQThreadPool) throws KeeperException, InterruptedException { this.threadPool = distWorkQThreadPool; new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZRECOVERY).startProcessing(new LogProcessor(), this.threadPool); }
DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ);
@Override public void run() { DefaultConfiguration defaultConf = DefaultConfiguration.getDefaultConfiguration(); long defaultDelay = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY); long defaultPeriod = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD); long delay = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY); long period = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD); try { DistributedWorkQueue workQueue; if (defaultDelay != delay && defaultPeriod != period) { log.debug("Configuration DistributedWorkQueue with delay and period of {} and {}", delay, period); workQueue = new DistributedWorkQueue( ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf, delay, period); } else { log.debug("Configuring DistributedWorkQueue with default delay and period"); workQueue = new DistributedWorkQueue( ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf); } workQueue.startProcessing(new ReplicationProcessor(context, conf, fs), executor); } catch (KeeperException | InterruptedException e) { throw new RuntimeException(e); } } }
DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ, master.getConfiguration());
bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZBULK_FAILED_COPYQ); try { bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
getConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue"); bulkFailedCopyQ = new DistributedWorkQueue( ZooUtil.getRoot(getInstance()) + Constants.ZBULK_FAILED_COPYQ, getConfiguration()); try {
final String workQueuePath = zkRoot + ReplicationConstants.ZOO_WORK_QUEUE; DistributedWorkQueue workQueue = new DistributedWorkQueue(workQueuePath, Monitor.getContext().getConfiguration());