static Descriptor rename(Descriptor tmpdesc, Set<Component> components) { Descriptor newdesc = tmpdesc.asType(Descriptor.Type.FINAL); rename(tmpdesc, newdesc, components); return newdesc; }
public void create(Collection<SSTableWriter> sstables) { List<String> sstablePaths = new ArrayList<>(sstables.size()); for (SSTableWriter writer : sstables) { /* write out the file names *without* the 'tmp-file' flag in the file name. this class will not need to clean up tmp files (on restart), CassandraDaemon does that already, just make sure we delete the fully-formed SSTRs. */ sstablePaths.add(writer.descriptor.asType(Descriptor.Type.FINAL).baseFilename()); } try { Files.write(lockfile.toPath(), sstablePaths, Charsets.UTF_8, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, StandardOpenOption.DSYNC); } catch (IOException e) { logger.warn(String.format("Could not create lockfile %s for stream session, nothing to worry too much about", lockfile), e); } }
public static Ref<DescriptorTypeTidy> get(SSTableReader sstable) { Descriptor desc = sstable.descriptor; if (sstable.openReason == OpenReason.EARLY) desc = desc.asType(Descriptor.Type.TEMPLINK); Ref<DescriptorTypeTidy> refc = lookup.get(desc); if (refc != null) return refc.ref(); final DescriptorTypeTidy tidy = new DescriptorTypeTidy(desc, sstable); refc = new Ref<>(tidy, tidy); Ref<?> ex = lookup.putIfAbsent(desc, refc); assert ex == null; return refc; } }
private Descriptor makeTmpLinks() { // create temp links if they don't already exist Descriptor link = descriptor.asType(Descriptor.Type.TEMPLINK); if (!new File(link.filenameFor(Component.PRIMARY_INDEX)).exists()) { FileUtils.createHardLink(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), new File(link.filenameFor(Component.PRIMARY_INDEX))); FileUtils.createHardLink(new File(descriptor.filenameFor(Component.DATA)), new File(link.filenameFor(Component.DATA))); } return link; }
private void rewriteSSTableMetadata(Descriptor descriptor, Map<MetadataType, MetadataComponent> currentComponents) throws IOException { Descriptor tmpDescriptor = descriptor.asType(Descriptor.Type.TEMP); try (DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(tmpDescriptor.filenameFor(Component.STATS)))) { serialize(currentComponents, out); out.flush(); } // we cant move a file on top of another file in windows: if (FBUtilities.isWindows()) FileUtils.delete(descriptor.filenameFor(Component.STATS)); FileUtils.renameWithConfirm(tmpDescriptor.filenameFor(Component.STATS), descriptor.filenameFor(Component.STATS)); } }
public SSTableReader openEarly(long maxDataAge) { StatsMetadata sstableMetadata = (StatsMetadata) sstableMetadataCollector.finalizeMetadata(partitioner.getClass().getCanonicalName(), metadata.getBloomFilterFpChance(), repairedAt).get(MetadataType.STATS); // find the max (exclusive) readable key IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable(); if (boundary == null) return null; assert boundary.indexLength > 0 && boundary.dataLength > 0; Descriptor link = makeTmpLinks(); // open the reader early, giving it a FINAL descriptor type so that it is indistinguishable for other consumers SegmentedFile ifile = iwriter.builder.complete(link.filenameFor(Component.PRIMARY_INDEX), boundary.indexLength); SegmentedFile dfile = dbuilder.complete(link.filenameFor(Component.DATA), boundary.dataLength); SSTableReader sstable = SSTableReader.internalOpen(descriptor.asType(Descriptor.Type.FINAL), components, metadata, partitioner, ifile, dfile, iwriter.summary.build(partitioner, boundary), iwriter.bf.sharedCopy(), maxDataAge, sstableMetadata, SSTableReader.OpenReason.EARLY); // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed) sstable.first = getMinimalKey(first); sstable.last = getMinimalKey(boundary.lastKey); return sstable; }
SSTableReader sstable = SSTableReader.internalOpen(desc.asType(Descriptor.Type.FINAL), components, this.metadata,