Refine search
public String getToken() { if (StorageService.instance.isClientMode()) return CassandraUtils.fakeToken; return StorageService.instance.getTokenMetadata().getToken(FBUtilities.getLocalAddress()).toString(); }
if(! Schema.instance.getNonSystemTables().contains(CassandraUtils.keySpace) ) throw new IOException("Solandra keyspace is missing, please import then retry"); else List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(CassandraUtils.keySpace, subIndex); DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getLocalAddress(), endpoints); String shard = addr.getHostAddress() + ":" + CassandraUtils.port + "/solandra/" + indexName + "~" + i;
public List<KeyRange> getLocalKeyPartition() throws BackendException { ensureKeyspaceExists(keySpaceName); @SuppressWarnings("rawtypes") Collection<Range<Token>> ranges = StorageService.instance.getPrimaryRanges(keySpaceName); List<KeyRange> keyRanges = new ArrayList<KeyRange>(ranges.size()); for (@SuppressWarnings("rawtypes") Range<Token> range : ranges) { keyRanges.add(CassandraHelper.transformRange(range)); } return keyRanges; }
public Map<String, String> getEndpointToHostId() { Map<String, String> mapOut = new HashMap<>(); for (Map.Entry<InetAddress, UUID> entry : getTokenMetadata().getEndpointToHostIdMapForReading().entrySet()) mapOut.put(entry.getKey().getHostAddress(), entry.getValue().toString()); return mapOut; }
logger.info("Hostname: {}", InetAddress.getLocalHost().getHostName()); if (!DatabaseDescriptor.hasLargeAddressSpace()) logger.info("32bit JVM detected. It is recommended to run Cassandra on a 64bit JVM for better performance."); String javaVersion = System.getProperty("java.version"); Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays.asList(DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation())); for (String dataDir : dirs) for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(Keyspace.SYSTEM_KS).values()) ColumnFamilyStore.scrubDataDirectories(cfm); try for (Pair<String, String> kscf : unfinishedCompactions.keySet()) CFMetaData cfm = Schema.instance.getCFMetaData(kscf.left, kscf.right); for (String keyspaceName : Schema.instance.getKeyspaces()) StorageService.instance.registerDaemon(this); try StorageService.instance.initServer(); if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress())) waitForGossipToSettle();
StorageService.instance.populateTokenMetadata(); Schema.instance.loadFromDisk(); for (String keyspaceName : Schema.instance.getKeyspaces()) continue; for (CFMetaData cfm : Schema.instance.getTablesAndViews(keyspaceName)) StorageService.instance.populateTokenMetadata(); new LegacyHintsMigrator(DatabaseDescriptor.getHintsDirectory(), DatabaseDescriptor.getMaxHintsFileSize()).migrate(); StorageService.instance.registerDaemon(this); try StorageService.instance.initServer(); if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress())) Gossiper.waitToSettle(); InetAddress rpcAddr = DatabaseDescriptor.getRpcAddress(); int rpcPort = DatabaseDescriptor.getRpcPort(); int listenBacklog = DatabaseDescriptor.getRpcListenBacklog();
/** * We try to deliver the mutations to the replicas ourselves if they are alive and only resort to writing hints * when a replica is down or a write request times out. * * @return direct delivery handler to wait on or null, if no live nodes found */ private ReplayWriteResponseHandler sendSingleReplayMutation(final Mutation mutation, long writtenAt, int ttl) { Set<InetAddress> liveEndpoints = new HashSet<>(); String ks = mutation.getKeyspaceName(); Token tk = StorageService.getPartitioner().getToken(mutation.key()); for (InetAddress endpoint : Iterables.concat(StorageService.instance.getNaturalEndpoints(ks, tk), StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, ks))) { if (endpoint.equals(FBUtilities.getBroadcastAddress())) mutation.apply(); else if (FailureDetector.instance.isAlive(endpoint)) liveEndpoints.add(endpoint); // will try delivering directly instead of writing a hint. else StorageProxy.writeHintForMutation(mutation, writtenAt, ttl, endpoint); } if (liveEndpoints.isEmpty()) return null; ReplayWriteResponseHandler handler = new ReplayWriteResponseHandler(liveEndpoints); MessageOut<Mutation> message = mutation.createMessage(); for (InetAddress endpoint : liveEndpoints) MessagingService.instance().sendRR(message, endpoint, handler, false); return handler; }
if (endpoint.equals(FBUtilities.getBroadcastAddress())) drain(); excise(removeTokens, endpoint, extractExpireTime(pieces)); String[] coordinator = splitValue(Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.REMOVAL_COORDINATOR)); UUID hostId = UUID.fromString(coordinator[1]); restoreReplicaCount(endpoint, tokenMetadata.getEndpointForHostId(hostId)); addExpireTimeIfFound(endpoint, extractExpireTime(pieces)); removeEndpoint(endpoint);
public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies) { Collection<Range<Token>> ranges; if (primaryRange) { ranges = isLocal ? getPrimaryRangesWithinDC(keyspace) : getPrimaryRanges(keyspace); } else { ranges = getLocalRanges(keyspace); } return forceRepairAsync(keyspace, isSequential, isLocal, ranges, fullRepair, columnFamilies); }
if (DatabaseDescriptor.isReplacing() && !(Boolean.parseBoolean(System.getProperty("cassandra.join_ring", "true")))) throw new ConfigurationException("Cannot set both join_ring=false and attempt to replace a node"); if (DatabaseDescriptor.getReplaceTokens().size() > 0 || DatabaseDescriptor.getReplaceNode() != null) throw new RuntimeException("Replace method removed; use cassandra.replace_address instead"); if (DatabaseDescriptor.isReplacing()) if (!DatabaseDescriptor.isAutoBootstrap()) throw new RuntimeException("Trying to replace_address with auto_bootstrap disabled will not work, check your configuration"); bootstrapTokens = prepareReplacementInfo(); appStates.put(ApplicationState.TOKENS, valueFactory.tokens(bootstrapTokens)); appStates.put(ApplicationState.STATUS, valueFactory.hibernate(true)); else if (shouldBootstrap()) checkForEndpointCollision(); getTokenMetadata().updateHostId(localHostId, FBUtilities.getBroadcastAddress()); appStates.put(ApplicationState.NET_VERSION, valueFactory.networkVersion()); appStates.put(ApplicationState.HOST_ID, valueFactory.hostId(localHostId)); gossipSnitchInfo(); Schema.instance.updateVersionAndAnnounce(); // Ensure we know our own actual Schema UUID in preparation for updates MessagingService.instance().listen(FBUtilities.getLocalAddress()); LoadBroadcaster.instance.startBroadcasting();
InetAddress localAddress = FBUtilities.getBroadcastAddress(); if (getTokenMetadata().getTokens(localAddress).size() > 1) List<String> keyspacesToProcess = Schema.instance.getNonLocalStrategyKeyspaces(); setMode(Mode.MOVING, String.format("Moving %s from %s to %s.", localAddress, getLocalTokens().iterator().next(), newToken), true); setMode(Mode.MOVING, String.format("Sleeping %s ms before start streaming/fetching ranges", RING_DELAY), true); Uninterruptibles.sleepUninterruptibly(RING_DELAY, TimeUnit.MILLISECONDS); setMode(Mode.MOVING, "fetching new ranges and streaming old ranges", true); try setMode(Mode.MOVING, "No ranges to fetch/stream", true); setTokens(Collections.singleton(newToken)); // setting new token as we have everything settled logger.debug("Successfully moved to new token {}", getLocalTokens().iterator().next());
private static BatchlogEndpoints getBatchlogEndpoints(String localDataCenter, ConsistencyLevel consistencyLevel) throws UnavailableException { TokenMetadata.Topology topology = StorageService.instance.getTokenMetadata().cachedOnlyTokenMap().getTopology(); Multimap<String, InetAddress> localEndpoints = HashMultimap.create(topology.getDatacenterRacks().get(localDataCenter)); String localRack = DatabaseDescriptor.getEndpointSnitch().getRack(FBUtilities.getBroadcastAddress()); Collection<InetAddress> chosenEndpoints = new BatchlogManager.EndpointFilter(localRack, localEndpoints).filter(); if (chosenEndpoints.isEmpty()) { if (consistencyLevel == ConsistencyLevel.ANY) return new BatchlogEndpoints(Collections.singleton(FBUtilities.getBroadcastAddress())); throw new UnavailableException(ConsistencyLevel.ONE, 1, 0); } return new BatchlogEndpoints(chosenEndpoints); }
public static List<InetAddress> getLiveSortedEndpoints(Keyspace keyspace, RingPosition pos) { List<InetAddress> liveEndpoints = StorageService.instance.getLiveNaturalEndpoints(keyspace, pos); DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), liveEndpoints); return liveEndpoints; }
if (Schema.instance.getCFMetaData(desc) == null) if (!StorageService.instance.isJoined()) Collection<Range<Token>> ranges = StorageService.instance.getLocalRanges(keyspace.getName()); boolean hasIndexes = cfs.indexManager.hasIndexes(); SSTableReader sstable = lookupSSTable(cfs, entry.getValue()); CleanupStrategy cleanupStrategy = CleanupStrategy.get(cfs, ranges, FBUtilities.nowInSeconds()); try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.CLEANUP))
CLibrary.tryMlockall(); listenPort = DatabaseDescriptor.getRpcPort(); listenAddr = DatabaseDescriptor.getRpcAddress(); for (CFMetaData cfm : DatabaseDescriptor.getTableMetaData(Table.SYSTEM_TABLE).values()) { ColumnFamilyStore.scrubDataDirectories(Table.SYSTEM_TABLE, cfm.cfName); StorageService.instance.registerDaemon(this); StorageService.instance.initServer();
public boolean appliesLocally(Mutation mutation) { String keyspaceName = mutation.getKeyspaceName(); Token token = mutation.key().getToken(); InetAddress local = FBUtilities.getBroadcastAddress(); return StorageService.instance.getNaturalEndpoints(keyspaceName, token).contains(local) || StorageService.instance.getTokenMetadata().pendingEndpointsFor(token, keyspaceName).contains(local); }
InetAddress localAddress = FBUtilities.getBroadcastAddress(); if (getTokenMetadata().getTokens(localAddress).size() > 1) List<String> keyspacesToProcess = Schema.instance.getNonSystemKeyspaces(); setMode(Mode.MOVING, String.format("Moving %s from %s to %s.", localAddress, getLocalTokens().iterator().next(), newToken), true); setMode(Mode.MOVING, String.format("Sleeping %s ms before start streaming/fetching ranges", RING_DELAY), true); Uninterruptibles.sleepUninterruptibly(RING_DELAY, TimeUnit.MILLISECONDS); setMode(Mode.MOVING, "fetching new ranges and streaming old ranges", true); try setMode(Mode.MOVING, "No ranges to fetch/stream", true); setTokens(Collections.singleton(newToken)); // setting new token as we have everything settled logger.debug("Successfully moved to new token {}", getLocalTokens().iterator().next());
private InetAddress getIndexLocation(String subIndex) { ByteBuffer indexName = CassandraUtils.hashBytes(subIndex.getBytes()); List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(CassandraUtils.keySpace, indexName); if (endpoints.isEmpty()) throw new RuntimeException("Unable to find a live endpoint for: " + subIndex); DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getLocalAddress(), endpoints); if (endpoints.contains(FBUtilities.getLocalAddress())) return FBUtilities.getLocalAddress(); return endpoints.get(0); }
public void validate(String name) { // Attempt to instantiate the ARS, which will throw a ConfigurationException if the options aren't valid. TokenMetadata tmd = StorageService.instance.getTokenMetadata(); IEndpointSnitch eps = DatabaseDescriptor.getEndpointSnitch(); AbstractReplicationStrategy.validateReplicationStrategy(name, klass, tmd, eps, options); }
RangeStreamer streamer = new RangeStreamer(tokenMetadata, FBUtilities.getBroadcastAddress(), "Rebuild"); streamer.addSourceFilter(new RangeStreamer.FailureDetectorSourceFilter(FailureDetector.instance)); if (sourceDc != null) streamer.addSourceFilter(new RangeStreamer.SingleDatacenterFilter(DatabaseDescriptor.getEndpointSnitch(), sourceDc)); for (String keyspaceName : Schema.instance.getNonSystemKeyspaces()) streamer.addRanges(keyspaceName, getLocalRanges(keyspaceName));