List of usage examples for com.google.common.primitives Longs toByteArray
public static byte[] toByteArray(long value)
From source file:org.opendaylight.openflowplugin.extension.vendor.nicira.convertor.match.MatchUtil.java
public static Ipv4Address longToIpv4Address(Long l) { byte[] bytes = Longs.toByteArray(l); String[] strArray = new String[4]; for (int i = 4; i < bytes.length; i++) { strArray[i - 4] = UnsignedBytes.toString(bytes[i]); }/* w ww .ja va 2s. c o m*/ String str = JOINER.join(strArray); Ipv4Address result = new Ipv4Address(str); return result; }
From source file:co.paralleluniverse.galaxy.berkeleydb.BerkeleyDB.java
@Override public MainMemoryEntry read(long id) { final DatabaseEntry dbEntry = new DatabaseEntry(); OperationStatus status = mainStore.get(null, new DatabaseEntry(Longs.toByteArray(id)), dbEntry, LockMode.READ_COMMITTED);//from w w w .j a v a2s . c o m if (status == OperationStatus.SUCCESS) { final MainMemoryEntry entry = entryBinding.entryToObject(dbEntry); return entry; } else return null; }
From source file:co.paralleluniverse.galaxy.berkeleydb.BerkeleyDB.java
@Override public void delete(long id, Object txn) { mainStore.delete((Transaction) txn, new DatabaseEntry(Longs.toByteArray(id))); ownerDirectory.delete((Transaction) txn, new DatabaseEntry(Longs.toByteArray(id))); }
From source file:org.jooby.Asset.java
/** * @return Generate a weak Etag using the {@link #path()}, {@link #lastModified()} and * {@link #length()}./*from w w w .j av a 2 s.c om*/ */ @Nonnull default String etag() { StringBuilder b = new StringBuilder(32); b.append("W/\""); BaseEncoding b64 = BaseEncoding.base64(); int lhash = resource().hashCode(); b.append(b64.encode(Longs.toByteArray(lastModified() ^ lhash))); b.append(b64.encode(Longs.toByteArray(length() ^ lhash))); b.append('"'); return b.toString(); }
From source file:org.apache.guacamole.totp.TOTPGenerator.java
/** * Generates a TOTP code of the given length using the given absolute * timestamp rather than the current system time. * * @param time//from w w w. j a va 2 s .co m * The absolute timestamp to use to generate the TOTP code, in seconds * since midnight, 1970-01-01, UTC (UNIX epoch). * * @return * The TOTP code which corresponds to the given timestamp, having * exactly the given length. * * @throws IllegalArgumentException * If the given length is invalid as defined by the TOTP specification. */ public String generate(long time) { // Calculate HOTP counter value based on provided time long counter = (time - startTime) / timeStep; byte[] hash = getHMAC(Longs.toByteArray(counter)); // Calculate HOTP value as defined by section 5.2 of RFC 4226: // https://tools.ietf.org/html/rfc4226#section-5.2 int offset = hash[hash.length - 1] & 0xF; int binary = ((hash[offset] & 0x7F) << 24) | ((hash[offset + 1] & 0xFF) << 16) | ((hash[offset + 2] & 0xFF) << 8) | (hash[offset + 3] & 0xFF); // Truncate or pad the value accordingly return toCode(binary, length); }
From source file:com.linkedin.pinot.integration.tests.ClusterIntegrationTestUtils.java
/** * Push the records from the given Avro files into a Kafka stream. * * @param avroFiles List of Avro files//from ww w .ja v a 2 s . c o m * @param kafkaBroker Kafka broker config * @param kafkaTopic Kafka topic * @param maxNumKafkaMessagesPerBatch Maximum number of Kafka messages per batch * @param header Optional Kafka message header * @param partitionColumn Optional partition column * @throws Exception */ public static void pushAvroIntoKafka(@Nonnull List<File> avroFiles, @Nonnull String kafkaBroker, @Nonnull String kafkaTopic, int maxNumKafkaMessagesPerBatch, @Nullable byte[] header, @Nullable String partitionColumn) throws Exception { Properties properties = new Properties(); properties.put("metadata.broker.list", kafkaBroker); properties.put("serializer.class", "kafka.serializer.DefaultEncoder"); properties.put("request.required.acks", "1"); properties.put("partitioner.class", "kafka.producer.ByteArrayPartitioner"); ProducerConfig producerConfig = new ProducerConfig(properties); Producer<byte[], byte[]> producer = new Producer<>(producerConfig); try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream(65536)) { for (File avroFile : avroFiles) { try (DataFileStream<GenericRecord> reader = AvroUtils.getAvroReader(avroFile)) { BinaryEncoder binaryEncoder = new EncoderFactory().directBinaryEncoder(outputStream, null); GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(reader.getSchema()); List<KeyedMessage<byte[], byte[]>> messagesToWrite = new ArrayList<>( maxNumKafkaMessagesPerBatch); for (GenericRecord genericRecord : reader) { outputStream.reset(); if (header != null && 0 < header.length) { outputStream.write(header); } datumWriter.write(genericRecord, binaryEncoder); binaryEncoder.flush(); byte[] keyBytes = (partitionColumn == null) ? Longs.toByteArray(System.currentTimeMillis()) : (genericRecord.get(partitionColumn)).toString().getBytes(); byte[] bytes = outputStream.toByteArray(); KeyedMessage<byte[], byte[]> data = new KeyedMessage<>(kafkaTopic, keyBytes, bytes); messagesToWrite.add(data); // Send a batch of messages if (messagesToWrite.size() == maxNumKafkaMessagesPerBatch) { producer.send(messagesToWrite); messagesToWrite.clear(); } } // Send last batch of messages producer.send(messagesToWrite); } } } }
From source file:org.voltdb.SnapshotSaveAPI.java
private void createSetup(String file_path, String file_nonce, SnapshotFormat format, long txnId, String data, SystemProcedureExecutionContext context, String hostname, final VoltTable result) { {/*w ww . j ava 2 s.c o m*/ SiteTracker tracker = context.getSiteTrackerForSnapshot(); final int numLocalSites = (tracker.getLocalSites().length - recoveringSiteCount.get()); // non-null if targeting only one site (used for rejoin) // set later from the "data" JSON string Long targetHSid = null; MessageDigest digest; try { digest = MessageDigest.getInstance("SHA-1"); } catch (NoSuchAlgorithmException e) { throw new AssertionError(e); } /* * List of partitions to include if this snapshot is * going to be deduped. Attempts to break up the work * by seeding an RNG selecting * a random replica to do the work. Will not work in failure * cases, but we don't use dedupe when we want durability. * * Originally used the partition id as the seed, but it turns out * that nextInt(2) returns a 1 for seeds 0-4095. Now use SHA-1 * on the txnid + partition id. */ List<Integer> partitionsToInclude = new ArrayList<Integer>(); List<Long> sitesToInclude = new ArrayList<Long>(); for (long localSite : tracker.getLocalSites()) { final int partitionId = tracker.getPartitionForSite(localSite); List<Long> sites = new ArrayList<Long>( tracker.getSitesForPartition(tracker.getPartitionForSite(localSite))); Collections.sort(sites); digest.update(Longs.toByteArray(txnId)); final long seed = Longs .fromByteArray(Arrays.copyOf(digest.digest(Ints.toByteArray(partitionId)), 8)); int siteIndex = new java.util.Random(seed).nextInt(sites.size()); if (localSite == sites.get(siteIndex)) { partitionsToInclude.add(partitionId); sitesToInclude.add(localSite); } } assert (partitionsToInclude.size() == sitesToInclude.size()); /* * Used to close targets on failure */ final ArrayList<SnapshotDataTarget> targets = new ArrayList<SnapshotDataTarget>(); try { final ArrayDeque<SnapshotTableTask> partitionedSnapshotTasks = new ArrayDeque<SnapshotTableTask>(); final ArrayList<SnapshotTableTask> replicatedSnapshotTasks = new ArrayList<SnapshotTableTask>(); assert (SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.get() == -1); final List<Table> tables = SnapshotUtil.getTablesToSave(context.getDatabase()); if (format.isFileBased()) { Runnable completionTask = SnapshotUtil.writeSnapshotDigest(txnId, context.getCatalogCRC(), file_path, file_nonce, tables, context.getHostId(), SnapshotSiteProcessor.getExportSequenceNumbers()); if (completionTask != null) { SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask); } completionTask = SnapshotUtil.writeSnapshotCatalog(file_path, file_nonce); if (completionTask != null) { SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask); } } final AtomicInteger numTables = new AtomicInteger(tables.size()); final SnapshotRegistry.Snapshot snapshotRecord = SnapshotRegistry.startSnapshot(txnId, context.getHostId(), file_path, file_nonce, format, tables.toArray(new Table[0])); SnapshotDataTarget sdt = null; if (!format.isTableBased()) { // table schemas for all the tables we'll snapshot on this partition Map<Integer, byte[]> schemas = new HashMap<Integer, byte[]>(); for (final Table table : SnapshotUtil.getTablesToSave(context.getDatabase())) { VoltTable schemaTable = CatalogUtil.getVoltTable(table); schemas.put(table.getRelativeIndex(), schemaTable.getSchemaBytes()); } if (format == SnapshotFormat.STREAM && data != null) { JSONObject jsObj = new JSONObject(data); long hsId = jsObj.getLong("hsId"); // if a target_hsid exists, set it for filtering a snapshot for a specific site try { targetHSid = jsObj.getLong("target_hsid"); } catch (JSONException e) { } // leave value as null on exception // if this snapshot targets a specific site... if (targetHSid != null) { // get the list of sites on this node List<Long> localHSids = tracker.getSitesForHost(context.getHostId()); // if the target site is local to this node... if (localHSids.contains(targetHSid)) { sdt = new StreamSnapshotDataTarget(hsId, schemas); } else { sdt = new DevNullSnapshotTarget(); } } } } for (final Table table : SnapshotUtil.getTablesToSave(context.getDatabase())) { /* * For a deduped csv snapshot, only produce the replicated tables on the "leader" * host. */ if (format == SnapshotFormat.CSV && table.getIsreplicated() && !tracker.isFirstHost()) { snapshotRecord.removeTable(table.getTypeName()); continue; } String canSnapshot = "SUCCESS"; String err_msg = ""; File saveFilePath = null; if (format.isFileBased()) { saveFilePath = SnapshotUtil.constructFileForTable(table, file_path, file_nonce, format, context.getHostId()); } try { if (format == SnapshotFormat.CSV) { sdt = new SimpleFileSnapshotDataTarget(saveFilePath); } else if (format == SnapshotFormat.NATIVE) { sdt = constructSnapshotDataTargetForTable(context, saveFilePath, table, context.getHostId(), tracker.m_numberOfPartitions, txnId); } if (sdt == null) { throw new IOException("Unable to create snapshot target"); } targets.add(sdt); final SnapshotDataTarget sdtFinal = sdt; final Runnable onClose = new Runnable() { @SuppressWarnings("synthetic-access") @Override public void run() { snapshotRecord.updateTable(table.getTypeName(), new SnapshotRegistry.Snapshot.TableUpdater() { @Override public SnapshotRegistry.Snapshot.Table update( SnapshotRegistry.Snapshot.Table registryTable) { return snapshotRecord.new Table(registryTable, sdtFinal.getBytesWritten(), sdtFinal.getLastWriteException()); } }); int tablesLeft = numTables.decrementAndGet(); if (tablesLeft == 0) { final SnapshotRegistry.Snapshot completed = SnapshotRegistry .finishSnapshot(snapshotRecord); final double duration = (completed.timeFinished - org.voltdb.TransactionIdManager .getTimestampFromTransactionId(completed.txnId)) / 1000.0; HOST_LOG.info("Snapshot " + snapshotRecord.nonce + " finished at " + completed.timeFinished + " and took " + duration + " seconds "); } } }; sdt.setOnCloseHandler(onClose); List<SnapshotDataFilter> filters = new ArrayList<SnapshotDataFilter>(); if (format == SnapshotFormat.CSV) { /* * Don't need to do filtering on a replicated table. */ if (!table.getIsreplicated()) { filters.add(new PartitionProjectionSnapshotFilter(Ints.toArray(partitionsToInclude), 0)); } filters.add(new CSVSnapshotFilter(CatalogUtil.getVoltTable(table), ',', null)); } // if this snapshot targets a specific site... if (targetHSid != null) { // get the list of sites on this node List<Long> localHSids = tracker.getSitesForHost(context.getHostId()); // if the target site is local to this node... if (localHSids.contains(targetHSid)) { // ...get its partition id... int partitionId = tracker.getPartitionForSite(targetHSid); // ...and build a filter to only get that partition filters.add(new PartitionProjectionSnapshotFilter(new int[] { partitionId }, sdt.getHeaderSize())); } else { // filter EVERYTHING because the site we want isn't local filters.add(new PartitionProjectionSnapshotFilter(new int[0], sdt.getHeaderSize())); } } final SnapshotTableTask task = new SnapshotTableTask(table.getRelativeIndex(), sdt, filters.toArray(new SnapshotDataFilter[filters.size()]), table.getIsreplicated(), table.getTypeName()); if (table.getIsreplicated()) { replicatedSnapshotTasks.add(task); } else { partitionedSnapshotTasks.offer(task); } } catch (IOException ex) { /* * Creation of this specific target failed. Close it if it was created. * Continue attempting the snapshot anyways so that at least some of the data * can be retrieved. */ try { if (sdt != null) { targets.remove(sdt); sdt.close(); } } catch (Exception e) { HOST_LOG.error(e); } StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ex.printStackTrace(pw); pw.flush(); canSnapshot = "FAILURE"; err_msg = "SNAPSHOT INITIATION OF " + file_nonce + "RESULTED IN IOException: \n" + sw.toString(); } result.addRow(context.getHostId(), hostname, table.getTypeName(), canSnapshot, err_msg); } synchronized (SnapshotSiteProcessor.m_taskListsForSites) { boolean aborted = false; if (!partitionedSnapshotTasks.isEmpty() || !replicatedSnapshotTasks.isEmpty()) { SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.set(numLocalSites); for (int ii = 0; ii < numLocalSites; ii++) { SnapshotSiteProcessor.m_taskListsForSites.add(new ArrayDeque<SnapshotTableTask>()); } } else { SnapshotRegistry.discardSnapshot(snapshotRecord); aborted = true; } /** * Distribute the writing of replicated tables to exactly one partition. */ for (int ii = 0; ii < numLocalSites && !partitionedSnapshotTasks.isEmpty(); ii++) { SnapshotSiteProcessor.m_taskListsForSites.get(ii).addAll(partitionedSnapshotTasks); if (!format.isTableBased()) { SnapshotSiteProcessor.m_taskListsForSites.get(ii).addAll(replicatedSnapshotTasks); } } if (format.isTableBased()) { int siteIndex = 0; for (SnapshotTableTask t : replicatedSnapshotTasks) { SnapshotSiteProcessor.m_taskListsForSites.get(siteIndex++ % numLocalSites).offer(t); } } if (!aborted) { logSnapshotStartToZK(txnId, context, file_nonce); } } } catch (Exception ex) { /* * Close all the targets to release the threads. Don't let sites get any tasks. */ SnapshotSiteProcessor.m_taskListsForSites.clear(); for (SnapshotDataTarget sdt : targets) { try { sdt.close(); } catch (Exception e) { HOST_LOG.error(ex); } } StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ex.printStackTrace(pw); pw.flush(); result.addRow(context.getHostId(), hostname, "", "FAILURE", "SNAPSHOT INITIATION OF " + file_path + file_nonce + "RESULTED IN Exception: \n" + sw.toString()); HOST_LOG.error(ex); } finally { SnapshotSiteProcessor.m_snapshotPermits.release(numLocalSites); } } }
From source file:org.apache.apex.malhar.lib.wal.FSWindowDataManager.java
/** * Save writes 2 entries to the wal: <br/> * <ol>//from w w w . j a v a 2 s . c o m * <li>window id</li> * <li>artifact</li> * </ol> * Note: The wal is being used in batch mode so the part file will never be rotated between the 2 entries.<br/> * The wal part file may be rotated after both the entries, when * {@link FileSystemWAL.FileSystemWALWriter#rotateIfNecessary()} is triggered. * * @param object state * @param windowId window id * @throws IOException */ @Override public void save(Object object, long windowId) throws IOException { closeReaders(); FileSystemWAL.FileSystemWALWriter writer = wal.getWriter(); byte[] windowIdBytes = Longs.toByteArray(windowId); writer.append(new Slice(windowIdBytes)); /** * writer.append() will copy the data to the file output stream. * So the data in the buffer is not needed any more, and it is safe to reset the serializationBuffer. * * And as the data in stream memory can be cleaned all at once. So don't need to separate data by different windows, * so beginWindow() and endWindow() don't need to be called */ writer.append(toSlice(object)); serializationBuffer.reset(); wal.beforeCheckpoint(windowId); wal.windowWalParts.put(windowId, writer.getCurrentPointer().getPartNum()); writer.rotateIfNecessary(); }
From source file:com.linkedin.pinot.integration.tests.ClusterIntegrationTestUtils.java
/** * Push random generated//from w w w.j a v a 2 s .c o m * * @param avroFile Sample Avro file used to extract the Avro schema * @param kafkaBroker Kafka broker config * @param kafkaTopic Kafka topic * @param numKafkaMessagesToPush Number of Kafka messages to push * @param maxNumKafkaMessagesPerBatch Maximum number of Kafka messages per batch * @param header Optional Kafka message header * @param partitionColumn Optional partition column * @throws Exception */ @SuppressWarnings("unused") public static void pushRandomAvroIntoKafka(@Nonnull File avroFile, @Nonnull String kafkaBroker, @Nonnull String kafkaTopic, int numKafkaMessagesToPush, int maxNumKafkaMessagesPerBatch, @Nullable byte[] header, @Nullable String partitionColumn) throws Exception { Properties properties = new Properties(); properties.put("metadata.broker.list", kafkaBroker); properties.put("serializer.class", "kafka.serializer.DefaultEncoder"); properties.put("request.required.acks", "1"); properties.put("partitioner.class", "kafka.producer.ByteArrayPartitioner"); ProducerConfig producerConfig = new ProducerConfig(properties); Producer<byte[], byte[]> producer = new Producer<>(producerConfig); try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream(65536)) { try (DataFileStream<GenericRecord> reader = AvroUtils.getAvroReader(avroFile)) { BinaryEncoder binaryEncoder = new EncoderFactory().directBinaryEncoder(outputStream, null); Schema avroSchema = reader.getSchema(); GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(avroSchema); List<KeyedMessage<byte[], byte[]>> messagesToWrite = new ArrayList<>(maxNumKafkaMessagesPerBatch); GenericRecord genericRecord = new GenericData.Record(avroSchema); while (numKafkaMessagesToPush > 0) { generateRandomRecord(genericRecord, avroSchema); outputStream.reset(); if (header != null && 0 < header.length) { outputStream.write(header); } datumWriter.write(genericRecord, binaryEncoder); binaryEncoder.flush(); byte[] keyBytes = (partitionColumn == null) ? Longs.toByteArray(System.currentTimeMillis()) : (genericRecord.get(partitionColumn)).toString().getBytes(); byte[] bytes = outputStream.toByteArray(); KeyedMessage<byte[], byte[]> data = new KeyedMessage<>(kafkaTopic, keyBytes, bytes); messagesToWrite.add(data); // Send a batch of messages if (messagesToWrite.size() == maxNumKafkaMessagesPerBatch) { producer.send(messagesToWrite); messagesToWrite.clear(); } numKafkaMessagesToPush--; } // Send last batch of messages producer.send(messagesToWrite); } } }
From source file:se.sics.caracaldb.global.LUTManager.java
private void askForUpdatesTo(long version) { try {/*from ww w . ja v a 2 s . co m*/ Key startKey = LookupTable.RESERVED_LUTUPDATES.append(new Key(Longs.toByteArray(lut.versionId))).get(); Key endKey = LookupTable.RESERVED_LUTUPDATES.append(new Key(Longs.toByteArray(version))).get(); KeyRange range = KeyRange.open(startKey).open(endKey); UUID id = TimestampIdFactory.get().newId(); RangeQuery.Request r = new RangeQuery.Request(id, range, null, null, null, RangeQuery.Type.SEQUENTIAL); Address dest = lut.findDest(startKey, self, RAND); CaracalMsg msg = new CaracalMsg(self, dest, r); trigger(msg, net); collectors.put(id, new RangeQuery.SeqCollector(r)); } catch (LookupTable.NoResponsibleForKeyException ex) { LOG.error("{}: Apparently noone is responsible for the reserved range -.-: {}", self, ex); } catch (LookupTable.NoSuchSchemaException ex) { LOG.error("{}: Apparently the reserved range doesn't have a schema!!! -.-: {}", self, ex); } }