List of usage examples for java.util UUID UUID
public UUID(long mostSigBits, long leastSigBits)
From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java
/** * Build the graph of segments reachable from an initial set of segments * @param roots the initial set of segments * @param visitor visitor receiving call back while following the segment graph * @throws IOException/*from w ww.j a va 2 s. c om*/ */ public void traverseSegmentGraph(@Nonnull Set<UUID> roots, @Nonnull SegmentGraphVisitor visitor) throws IOException { checkNotNull(roots); checkNotNull(visitor); Map<UUID, List<UUID>> graph = getGraph(); TarEntry[] entries = getEntries(); for (int i = entries.length - 1; i >= 0; i--) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if (roots.remove(id) && isDataSegmentId(entry.lsb())) { // this is a referenced data segment, so follow the graph List<UUID> refIds = getReferences(entry, id, graph); if (refIds != null) { for (UUID refId : refIds) { visitor.accept(id, refId); roots.add(refId); } } else { visitor.accept(id, null); } } else { // this segment is not referenced anywhere visitor.accept(id, null); } } }
From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java
@Override public byte[] encodeDeleteNotification(int id) { return encodeBlobdb(new UUID(GB_UUID_MASK, id), BLOBDB_DELETE, BLOBDB_NOTIFICATION, null); }
From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java
/** * Build the graph of segments reachable from an initial set of segments * @param roots the initial set of segments * @param visitor visitor receiving call back while following the segment graph * @throws IOException// w ww . j a v a 2 s .co m */ public void traverseSegmentGraph(@Nonnull Set<UUID> roots, @Nonnull SegmentGraphVisitor visitor) throws IOException { checkNotNull(roots); checkNotNull(visitor); Map<UUID, List<UUID>> graph = getGraph(false); TarEntry[] entries = getEntries(); for (int i = entries.length - 1; i >= 0; i--) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if (roots.remove(id) && isDataSegmentId(entry.lsb())) { // this is a referenced data segment, so follow the graph for (UUID refId : getReferences(entry, id, graph)) { visitor.accept(id, refId); roots.add(refId); } } else { // this segment is not referenced anywhere visitor.accept(id, null); } } }
From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java
@Override public byte[] encodeAddCalendarEvent(CalendarEventSpec calendarEventSpec) { long id = calendarEventSpec.id != -1 ? calendarEventSpec.id : mRandom.nextLong(); int iconId;//from w w w .j a v a2 s .co m switch (calendarEventSpec.type) { case CalendarEventSpec.TYPE_SUNRISE: iconId = PebbleIconID.SUNRISE; break; case CalendarEventSpec.TYPE_SUNSET: iconId = PebbleIconID.SUNSET; break; default: iconId = PebbleIconID.TIMELINE_CALENDAR; } return encodeTimelinePin(new UUID(GB_UUID_MASK | calendarEventSpec.type, id), calendarEventSpec.timestamp, (short) calendarEventSpec.durationInSeconds, iconId, calendarEventSpec.title, calendarEventSpec.description); }
From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java
@Override public byte[] encodeDeleteCalendarEvent(byte type, long id) { return encodeBlobdb(new UUID(GB_UUID_MASK | type, id), BLOBDB_DELETE, BLOBDB_PIN, null); }
From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java
/** * Calculate the ids of the segments directly referenced from {@code referenceIds} * through forward references./*from ww w .j a v a2s. co m*/ * * @param referencedIds The initial set of ids to start from. On return it * contains the set of direct forward references. * * @throws IOException */ void calculateForwardReferences(Set<UUID> referencedIds) throws IOException { Map<UUID, List<UUID>> graph = getGraph(); TarEntry[] entries = getEntries(); for (int i = entries.length - 1; i >= 0; i--) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if (referencedIds.remove(id)) { if (isDataSegmentId(entry.lsb())) { // this is a referenced data segment, so follow the graph List<UUID> refIds = getReferences(entry, id, graph); if (refIds != null) { referencedIds.addAll(refIds); } } } } }
From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java
/** * Calculate the ids of the segments directly referenced from {@code referenceIds} * through forward references./*from w ww .ja v a 2 s. com*/ * * @param referencedIds The initial set of ids to start from. On return it * contains the set of direct forward references. * * @throws IOException */ void calculateForwardReferences(Set<UUID> referencedIds) throws IOException { Map<UUID, List<UUID>> graph = getGraph(false); TarEntry[] entries = getEntries(); for (int i = entries.length - 1; i >= 0; i--) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if (referencedIds.remove(id)) { if (isDataSegmentId(entry.lsb())) { referencedIds.addAll(getReferences(entry, id, graph)); } } } }
From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java
/** * Garbage collects segments in this file. First it collects the set of * segments that are referenced / reachable, then (if more than 25% is * garbage) creates a new generation of the file. * <p>/*w w w . j a va 2s . c o m*/ * The old generation files are not removed (they can't easily be removed, * for memory mapped files). * * @param referencedIds the referenced segment ids (input and output). * @param removed a set which will receive the uuids of all segments that * have been cleaned. * @return this (if the file is kept as is), or the new generation file, or * null if the file is fully garbage */ synchronized TarReader cleanup(Set<UUID> referencedIds, Set<UUID> removed) throws IOException { String name = file.getName(); log.debug("Cleaning up {}", name); Set<UUID> cleaned = newHashSet(); Map<UUID, List<UUID>> graph = getGraph(); TarEntry[] entries = getEntries(); int size = 0; int count = 0; for (int i = entries.length - 1; i >= 0; i--) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if (!referencedIds.remove(id)) { // this segment is not referenced anywhere cleaned.add(id); entries[i] = null; } else { size += getEntrySize(entry.size()); count += 1; if (isDataSegmentId(entry.lsb())) { // this is a referenced data segment, so follow the graph List<UUID> refIds = getReferences(entry, id, graph); if (refIds != null) { referencedIds.addAll(refIds); } } } } size += getEntrySize(24 * count + 16); size += 2 * BLOCK_SIZE; if (count == 0) { log.debug("None of the entries of {} are referenceable.", name); removed.addAll(cleaned); logCleanedSegments(cleaned); return null; } else if (size >= access.length() * 3 / 4 && graph != null) { // the space savings are not worth it at less than 25%, // unless this tar file lacks a pre-compiled segment graph // in which case we'll always generate a new tar file with // the graph to speed up future garbage collection runs. log.debug("Not enough space savings. ({}/{}). Skipping clean up of {}", access.length() - size, access.length(), name); return this; } int pos = name.length() - "a.tar".length(); char generation = name.charAt(pos); if (generation == 'z') { log.debug("No garbage collection after reaching generation z: {}", name); return this; } File newFile = new File(file.getParentFile(), name.substring(0, pos) + (char) (generation + 1) + ".tar"); log.debug("Writing new generation {}", newFile.getName()); TarWriter writer = new TarWriter(newFile); for (TarEntry entry : entries) { if (entry != null) { byte[] data = new byte[entry.size()]; access.read(entry.offset(), entry.size()).get(data); writer.writeEntry(entry.msb(), entry.lsb(), data, 0, entry.size()); } } writer.close(); TarReader reader = openFirstFileWithValidIndex(singletonList(newFile), access.isMemoryMapped()); if (reader != null) { logCleanedSegments(cleaned); removed.addAll(cleaned); return reader; } else { log.warn("Failed to open cleaned up tar file {}", file); return this; } }
From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java
/** * Collect reclaimable segments./*from w ww . j av a 2 s . c o m*/ * A data segment is reclaimable iff its generation is in the {@code reclaimGeneration} * predicate. * A bulk segment is reclaimable if it is in {@code bulkRefs} or if it is transitively * reachable through a non reclaimable data segment. * * @param bulkRefs bulk segment gc roots * @param reclaim reclaimable segments * @param reclaimGeneration reclaim generation predicate for data segments * @throws IOException */ void mark(Set<UUID> bulkRefs, Set<UUID> reclaim, Predicate<Integer> reclaimGeneration) throws IOException { Map<UUID, List<UUID>> graph = getGraph(true); TarEntry[] entries = getEntries(); for (int i = entries.length - 1; i >= 0; i--) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if ((!isDataSegmentId(entry.lsb()) && !bulkRefs.remove(id)) || (isDataSegmentId(entry.lsb()) && reclaimGeneration.apply(entry.generation()))) { // non referenced bulk segment or old data segment reclaim.add(id); } else { if (isDataSegmentId(entry.lsb())) { for (UUID refId : getReferences(entry, id, graph)) { if (!isDataSegmentId(refId.getLeastSignificantBits())) { // keep the extra check for bulk segments for the case where a // pre-compiled graph is not available and getReferences also // includes data references if (!reclaim.remove(id)) { bulkRefs.add(refId); } } } } } } }
From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java
/** * Remove reclaimable segments and collect actually reclaimed segments. * @param reclaim segments to reclaim * @param reclaimed actually reclaimed segments * @return reader resulting from the reclamation process * @throws IOException//from w w w .j a v a 2 s . co m */ TarReader sweep(@Nonnull Set<UUID> reclaim, @Nonnull Set<UUID> reclaimed) throws IOException { String name = file.getName(); log.debug("Cleaning up {}", name); Set<UUID> cleaned = newHashSet(); int size = 0; int count = 0; TarEntry[] entries = getEntries(); for (int i = 0; i < entries.length; i++) { TarEntry entry = entries[i]; UUID id = new UUID(entry.msb(), entry.lsb()); if (reclaim.contains(id)) { cleaned.add(id); entries[i] = null; } else { size += getEntrySize(entry.size()); count += 1; } } size += getEntrySize(TarEntry.SIZE * count + 16); size += 2 * BLOCK_SIZE; if (count == 0) { log.debug("None of the entries of {} are referenceable.", name); logCleanedSegments(cleaned); return null; } if (size >= access.length() * 3 / 4 && hasGraph()) { // the space savings are not worth it at less than 25%, // unless this tar file lacks a pre-compiled segment graph // in which case we'll always generate a new tar file with // the graph to speed up future garbage collection runs. log.debug("Not enough space savings. ({}/{}). Skipping clean up of {}", access.length() - size, access.length(), name); return this; } if (!hasGraph()) { log.warn("Recovering {}, which is missing its graph.", name); } int pos = name.length() - "a.tar".length(); char generation = name.charAt(pos); if (generation == 'z') { log.debug("No garbage collection after reaching generation z: {}", name); return this; } File newFile = new File(file.getParentFile(), name.substring(0, pos) + (char) (generation + 1) + ".tar"); log.debug("Writing new generation {}", newFile.getName()); TarWriter writer = new TarWriter(newFile); for (TarEntry entry : entries) { if (entry != null) { byte[] data = new byte[entry.size()]; access.read(entry.offset(), entry.size()).get(data); writer.writeEntry(entry.msb(), entry.lsb(), data, 0, entry.size(), entry.generation()); } } writer.close(); TarReader reader = openFirstFileWithValidIndex(singletonList(newFile), access.isMemoryMapped()); if (reader != null) { logCleanedSegments(cleaned); reclaimed.addAll(cleaned); return reader; } else { log.warn("Failed to open cleaned up tar file {}", file); return this; } }