/** * Utility program that will change the goal state for the master from the command line. */ public static void main(String[] args) throws Exception { if (args.length != 1 || MasterGoalState.valueOf(args[0]) == null) { System.err.println( "Usage: accumulo " + SetGoalState.class.getName() + " [NORMAL|SAFE_MODE|CLEAN_STOP]"); System.exit(-1); } ServerContext context = new ServerContext(new SiteConfiguration()); SecurityUtil.serverLogin(context.getConfiguration()); ServerUtil.waitForZookeeperAndHdfs(context); context.getZooReaderWriter().putPersistentData( context.getZooKeeperRoot() + Constants.ZMASTER_GOAL_STATE, args[0].getBytes(UTF_8), NodeExistsPolicy.OVERWRITE); }
@Override public void initialize(ServerContext context, boolean initialize) { this.context = context; zooCache = new ZooCache(context.getZooReaderWriter(), null); ZKUserPath = Constants.ZROOT + "/" + context.getInstanceID() + "/users"; }
/** * returns the actual NamespaceConfiguration that corresponds to the current parent namespace. */ public NamespaceConfiguration getNamespaceConfiguration() { return context.getServerConfFactory().getNamespaceConfiguration(parent.namespaceId); }
TableConfWatcher(ServerContext context) { this.context = context; tablesPrefix = context.getZooKeeperRoot() + Constants.ZTABLES + "/"; scf = context.getServerConfFactory(); }
public static void stop(ServerContext context, String type, long tid) throws KeeperException, InterruptedException { IZooReaderWriter writer = context.getZooReaderWriter(); writer.recursiveDelete(context.getZooKeeperRoot() + "/" + type + "/" + tid, NodeMissingPolicy.SKIP); }
public TabletServer(ServerContext context) { this.context = context; this.masterLockCache = new ZooCache(context.getZooReaderWriter(), null); this.watcher = new TransactionWatcher(context); this.confFactory = context.getServerConfFactory(); this.fs = context.getVolumeManager(); final AccumuloConfiguration aconf = getConfiguration(); log.info("Version " + Constants.VERSION); final long minBlockSize = context.getHadoopConf() .getLong("dfs.namenode.fs-limits.min-block-size", 0); if (minBlockSize != 0 && minBlockSize > walogMaxSize) context.setSecretManager(new AuthenticationTokenSecretManager(context.getInstanceID(), aconf.getTimeInMillis(Property.GENERAL_DELEGATION_TOKEN_LIFETIME))); if (aconf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) { log.info("SASL is enabled, creating ZooKeeper watcher for AuthenticationKeys"); authKeyWatcher = new ZooAuthenticationKeyWatcher(context.getSecretManager(), context.getZooReaderWriter(), context.getZooKeeperRoot() + Constants.ZDELEGATION_TOKEN_KEYS); } else { authKeyWatcher = null;
public static void main(String[] args) { final String app = "monitor"; ServerOpts opts = new ServerOpts(); opts.parseArgs(app, args); Monitor.context = new ServerContext(opts.getSiteConfiguration()); context.setupServer(app, Monitor.class.getName(), opts.getAddress()); try { config = context.getServerConfFactory(); Monitor monitor = new Monitor(); // Servlets need access to limit requests when the monitor is not active, but Servlets are // instantiated via reflection. Expose the service this way instead. Monitor.HA_SERVICE_INSTANCE = monitor; monitor.run(); } finally { context.teardownServer(); } }
@Override public void initialize(ServerContext context, boolean initialize) { this.context = context; zooCache = new ZooCache(context.getZooReaderWriter(), null); impersonation = new UserImpersonation(context.getConfiguration()); zkAuthenticator.initialize(context, initialize); zkUserPath = Constants.ZROOT + "/" + context.getInstanceID() + "/users"; }
public static void main(String[] args) throws Exception { final String app = "tracer"; ServerOpts opts = new ServerOpts(); opts.parseArgs(app, args); ServerContext context = new ServerContext(opts.getSiteConfiguration()); loginTracer(context.getConfiguration()); MetricsSystemHelper.configure(TraceServer.class.getSimpleName()); ServerUtil.init(context, app); try (TraceServer server = new TraceServer(context, opts.getAddress())) { server.run(); } finally { log.info("tracer stopping"); context.getZooReaderWriter().getZooKeeper().close(); } }
public static String[] getBaseUris(ServerContext context) { return getBaseUris(context.getConfiguration(), context.getHadoopConf()); }
public static void main(String[] args) throws Exception { final String app = "master"; ServerOpts opts = new ServerOpts(); opts.parseArgs(app, args); ServerContext context = new ServerContext(opts.getSiteConfiguration()); context.setupServer(app, Master.class.getName(), opts.getAddress()); try { Master master = new Master(context); master.run(); } finally { context.teardownServer(); } }
@Override public void execute(final String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs(ZooKeeperMain.class.getName(), args); try (ServerContext context = new ServerContext(new SiteConfiguration())) { FileSystem fs = context.getVolumeManager().getDefaultVolume().getFileSystem(); String baseDir = ServerConstants.getBaseUris(context)[0]; System.out.println("Using " + fs.makeQualified(new Path(baseDir + "/instance_id")) + " to lookup accumulo instance"); if (opts.servers == null) { opts.servers = context.getZooKeepers(); } System.out.println("The accumulo instance id is " + context.getInstanceID()); if (!opts.servers.contains("/")) opts.servers += "/accumulo/" + context.getInstanceID(); org.apache.zookeeper.ZooKeeperMain .main(new String[] {"-server", opts.servers, "-timeout", "" + (opts.timeout * 1000)}); } } }
public TableManager(ServerContext context) { this.context = context; zkRoot = context.getZooKeeperRoot(); instanceID = context.getInstanceID(); zoo = context.getZooReaderWriter(); zooStateCache = new ZooCache(zoo, new TableStateWatcher()); updateTableStateCache(); }
public ReplicationWorker(ServerContext context, VolumeManager fs) { this.context = context; this.fs = fs; this.conf = context.getConfiguration(); }
public static Map<KeyExtent,Long> estimateSizes(ServerContext context, Path mapFile, long fileSize, List<KeyExtent> extents) throws IOException { FileSystem ns = context.getVolumeManager().getVolumeByPath(mapFile).getFileSystem(); return BulkImport.estimateSizes(context.getConfiguration(), mapFile, fileSize, extents, ns, null, context.getCryptoService()); }
private synchronized FileSKVIterator getReader() throws IOException { if (reader == null) { Configuration conf = context.getHadoopConf(); FileSystem fs = FileSystem.getLocal(conf); reader = new RFileOperations().newReaderBuilder() .forFile(memDumpFile, fs, conf, context.getCryptoService()) .withTableConfiguration(context.getConfiguration()).seekToBeginning().build(); if (iflag != null) reader.setInterruptFlag(iflag); if (getSamplerConfig() != null) { reader = reader.getSample(getSamplerConfig()); } } return reader; }
Property timeBetweenThreadChecksProperty, Property maxMessageSizeProperty) throws UnknownHostException { final AccumuloConfiguration config = service.getConfiguration(); final ThriftServerType serverType = service.getThriftServerType(); return TServerUtils.startTServer(serverType, timedProcessor, serverName, threadName, minThreads, simpleTimerThreadpoolSize, timeBetweenThreadChecks, maxMessageSize, service.getServerSslParams(), service.getSaslParams(), service.getClientTimeoutInMillis(), addresses); } catch (TTransportException e) { return TServerUtils.startTServer(serverType, timedProcessor, serverName, threadName, minThreads, simpleTimerThreadpoolSize, timeBetweenThreadChecks, maxMessageSize, service.getServerSslParams(), service.getSaslParams(), service.getClientTimeoutInMillis(), addr); } catch (TTransportException tte) { log.info("Unable to use port {}, retrying. (Thread Name = {})", port, threadName);
int maxToOpen = context.getConfiguration() .getCount(Property.TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN); ArrayList<FileSKVIterator> readers = new ArrayList<>(mapFiles.size()); mapFiles = reduceFiles(context, context.getHadoopConf(), prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0); long t2 = System.currentTimeMillis(); cleanupIndexOp(tmpDir, context.getVolumeManager(), readers);
public static int randomize(ServerContext context, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { final VolumeManager vm = context.getVolumeManager(); if (vm.getVolumes().size() < 2) { log.error("There are not enough volumes configured"); return 1; String tblStr = context.tableOperations().tableIdMap().get(tableName); if (tblStr == null) { log.error("Could not determine the table ID for table {}", tableName); TableState tableState = context.getTableManager().getTableState(tableId); if (tableState != TableState.OFFLINE) { log.info("Taking {} offline", tableName); context.tableOperations().offline(tableName, true); log.info("{} offline", tableName); Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY); DIRECTORY_COLUMN.fetch(scanner); scanner.setRange(TabletsSection.getRange(tableId)); BatchWriter writer = context.createBatchWriter(MetadataTable.NAME, null); int count = 0; for (Entry<Key,Value> entry : scanner) { context.tableOperations().online(tableName, true); log.info("table {} back online", tableName);
@Override public MonitorLocation get() { // lazily set up path and zooCache (see comment in constructor) if (this.context == null) { this.context = new ServerContext(new SiteConfiguration()); this.path = context.getZooKeeperRoot() + Constants.ZMONITOR_LOG4J_ADDR; this.zooCache = context.getZooCache(); } // get the current location from the cache and update if necessary ZcStat stat = new ZcStat(); byte[] loc = zooCache.get(path, stat); // mzxid is 0 if location does not exist and the non-zero transaction id of the last // modification otherwise return new MonitorLocation(stat.getMzxid(), loc); } }