Example usage for com.google.common.primitives Longs toArray

List of usage examples for com.google.common.primitives Longs toArray

Introduction

In this page you can find the example usage for com.google.common.primitives Longs toArray.

Prototype

public static long[] toArray(Collection<? extends Number> collection) 

Source Link

Document

Returns an array containing each value of collection , converted to a long value in the manner of Number#longValue .

Usage

From source file:org.apache.calcite.jdbc.CalciteMetaImpl.java

@Override
public ExecuteBatchResult executeBatch(StatementHandle h, List<List<TypedValue>> parameterValueLists)
        throws NoSuchStatementException {
    final List<Long> updateCounts = new ArrayList<>();
    for (List<TypedValue> parameterValueList : parameterValueLists) {
        ExecuteResult executeResult = execute(h, parameterValueList, -1);
        final long updateCount = executeResult.resultSets.size() == 1
                ? executeResult.resultSets.get(0).updateCount
                : -1L;//from w  w  w  .  jav a 2  s. c  om
        updateCounts.add(updateCount);
    }
    return new ExecuteBatchResult(Longs.toArray(updateCounts));
}

From source file:org.apache.calcite.jdbc.CalciteMetaImpl.java

@Override
public ExecuteBatchResult prepareAndExecuteBatch(final StatementHandle h, List<String> sqlCommands)
        throws NoSuchStatementException {
    final CalciteConnectionImpl calciteConnection = getConnection();
    final CalciteServerStatement statement = calciteConnection.server.getStatement(h);
    final List<Long> updateCounts = new ArrayList<>();
    final Meta.PrepareCallback callback = new Meta.PrepareCallback() {
        long updateCount;
        Signature signature;/*from   w ww .j  a v  a2s. c  o m*/

        public Object getMonitor() {
            return statement;
        }

        public void clear() throws SQLException {
        }

        public void assign(Meta.Signature signature, Meta.Frame firstFrame, long updateCount)
                throws SQLException {
            this.signature = signature;
            this.updateCount = updateCount;
        }

        public void execute() throws SQLException {
            if (signature.statementType.canUpdate()) {
                final Iterable<Object> iterable = _createIterable(h, signature, ImmutableList.<TypedValue>of(),
                        null);
                final Iterator<Object> iterator = iterable.iterator();
                updateCount = ((Number) iterator.next()).longValue();
            }
            updateCounts.add(updateCount);
        }
    };
    for (String sqlCommand : sqlCommands) {
        Util.discard(prepareAndExecute(h, sqlCommand, -1L, -1, callback));
    }
    return new ExecuteBatchResult(Longs.toArray(updateCounts));
}

From source file:key.secretkey.crypto.PgpHandler.java

/**
 * Encrypts a password file/*from   w w w .ja va2  s .  co m*/
 *
 * @param data
 */
public void encrypt(Intent data) {
    data.setAction(OpenPgpApi.ACTION_ENCRYPT);

    ArrayList<Long> longKeys = new ArrayList<>();
    for (String keyId : keyIDs)
        longKeys.add(Long.valueOf(keyId));
    data.putExtra(OpenPgpApi.EXTRA_KEY_IDS, Longs.toArray(longKeys));

    data.putExtra(OpenPgpApi.EXTRA_REQUEST_ASCII_ARMOR, true);

    String name = ((EditText) findViewById(R.id.crypto_password_file_edit)).getText().toString();
    String pass = ((EditText) findViewById(R.id.crypto_password_edit)).getText().toString();
    String extra = ((EditText) findViewById(R.id.crypto_extra_edit)).getText().toString();

    if (name.isEmpty()) {
        showToast(this.getResources().getString(R.string.file_toast_text));
        return;
    }

    if (pass.isEmpty() && extra.isEmpty()) {
        showToast(this.getResources().getString(R.string.empty_toast_text));
        return;
    }

    ByteArrayInputStream is;

    try {
        is = new ByteArrayInputStream((pass + "\n" + extra).getBytes("UTF-8"));

        ByteArrayOutputStream os = new ByteArrayOutputStream();

        OpenPgpApi api = new OpenPgpApi(this, mServiceConnection.getService());
        api.executeApiAsync(data, is, os, new PgpCallback(true, os, REQUEST_CODE_ENCRYPT));

    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:io.atomix.protocols.raft.impl.RaftServiceManager.java

/**
 * Applies a session keep alive entry to the state machine.
 * <p>/* ww w  .  j av a  2s. com*/
 * Keep alive entries are applied to the internal state machine to reset the timeout for a specific session. If the
 * session indicated by the KeepAliveEntry is still held in memory, we mark the session as trusted, indicating that
 * the client has committed a keep alive within the required timeout. Additionally, we check all other sessions for
 * expiration based on the timestamp provided by this KeepAliveEntry. Note that sessions are never completely expired
 * via this method. Leaders must explicitly commit an UnregisterEntry to expire a session.
 * <p>
 * When a KeepAliveEntry is committed to the internal state machine, two specific fields provided in the entry are
 * used to update server-side session state. The {@code commandSequence} indicates the highest command for which the
 * session has received a successful response in the proper sequence. By applying the {@code commandSequence} to the
 * server session, we clear command output held in memory up to that point. The {@code eventVersion} indicates the
 * index up to which the client has received event messages in sequence for the session. Applying the {@code
 * eventVersion} to the server-side session results in events up to that index being removed from memory as they were
 * acknowledged by the client. It's essential that both of these fields be applied via entries committed to the Raft
 * log to ensure they're applied on all servers in sequential order.
 * <p>
 * Keep alive entries are retained in the log until the next time the client sends a keep alive entry or until the
 * client's session is expired. This ensures for sessions that have long timeouts, keep alive entries cannot be
 * cleaned from the log before they're replicated to some servers.
 */
private long[] applyKeepAlive(Indexed<KeepAliveEntry> entry) {

    // Store the session/command/event sequence and event index instead of acquiring a reference to the entry.
    long[] sessionIds = entry.entry().sessionIds();
    long[] commandSequences = entry.entry().commandSequenceNumbers();
    long[] eventIndexes = entry.entry().eventIndexes();

    // Iterate through session identifiers and keep sessions alive.
    List<Long> successfulSessionIds = new ArrayList<>(sessionIds.length);
    Set<RaftServiceContext> services = new HashSet<>();
    for (int i = 0; i < sessionIds.length; i++) {
        long sessionId = sessionIds[i];
        long commandSequence = commandSequences[i];
        long eventIndex = eventIndexes[i];

        RaftSession session = raft.getSessions().getSession(sessionId);
        if (session != null) {
            if (session.getService().keepAlive(entry.index(), entry.entry().timestamp(), session,
                    commandSequence, eventIndex)) {
                successfulSessionIds.add(sessionId);
                services.add(session.getService());
            }
        }
    }

    // Iterate through services and complete keep-alives, causing sessions to be expired if necessary.
    for (RaftServiceContext service : services) {
        service.completeKeepAlive(entry.index(), entry.entry().timestamp());
    }

    expireOrphanSessions(entry.entry().timestamp());

    return Longs.toArray(successfulSessionIds);
}

From source file:org.voltcore.messaging.HostMessenger.java

public void send(long[] destinationHSIds, final VoltMessage message) {
    assert (message != null);
    assert (destinationHSIds != null);
    final HashMap<ForeignHost, ArrayList<Long>> foreignHosts = new HashMap<ForeignHost, ArrayList<Long>>(32);
    for (long hsId : destinationHSIds) {
        ForeignHost host = presend(hsId, message);
        if (host == null)
            continue;
        ArrayList<Long> bundle = foreignHosts.get(host);
        if (bundle == null) {
            bundle = new ArrayList<Long>();
            foreignHosts.put(host, bundle);
        }//from w ww  . j  a  v a 2  s  . c o  m
        bundle.add(hsId);
    }

    if (foreignHosts.size() == 0)
        return;

    for (Entry<ForeignHost, ArrayList<Long>> e : foreignHosts.entrySet()) {
        e.getKey().send(Longs.toArray(e.getValue()), message);
    }
}

From source file:org.voltdb.sysprocs.SnapshotRestore.java

private final DigestScanResult performRestoreDigestScanWork() {
    SynthesizedPlanFragment[] pfs = new SynthesizedPlanFragment[2];

    // This fragment causes each execution site to confirm the likely
    // success of writing tables to disk
    pfs[0] = new SynthesizedPlanFragment();
    pfs[0].fragmentId = SysProcFragmentId.PF_restoreDigestScan;
    pfs[0].outputDepId = DEP_restoreDigestScan;
    pfs[0].inputDepIds = new int[] {};
    pfs[0].multipartition = true;/* www.j a  v  a2s . c om*/
    pfs[0].parameters = ParameterSet.emptyParameterSet();

    // This fragment aggregates the save-to-disk sanity check results
    pfs[1] = new SynthesizedPlanFragment();
    pfs[1].fragmentId = SysProcFragmentId.PF_restoreDigestScanResults;
    pfs[1].outputDepId = DEP_restoreDigestScanResults;
    pfs[1].inputDepIds = new int[] { DEP_restoreDigestScan };
    pfs[1].multipartition = false;
    pfs[1].parameters = ParameterSet.emptyParameterSet();

    VoltTable[] results;
    results = executeSysProcPlanFragments(pfs, DEP_restoreDigestScanResults);

    HashMap<String, Map<Integer, Long>> exportSequenceNumbers = new HashMap<String, Map<Integer, Long>>();

    Long digestTxnId = null;
    ArrayList<JSONObject> digests = new ArrayList<JSONObject>();
    Set<Long> perPartitionTxnIds = new HashSet<Long>();

    /*
     * Retrieve and aggregate the per table per partition sequence numbers from
     * all the digest files retrieved across the cluster
     */
    try {
        while (results[0].advanceRow()) {
            if (results[0].getString("RESULT").equals("FAILURE")) {
                throw new VoltAbortException(results[0].getString("ERR_MSG"));
            }
            JSONObject digest = new JSONObject(results[0].getString(0));
            digests.add(digest);

            /*
             * Validate that the digests are all from the same snapshot
             */
            if (digestTxnId == null) {
                digestTxnId = digest.getLong("txnId");
            } else {
                if (digest.getLong("txnId") != digestTxnId) {
                    throw new VoltAbortException("Retrieved a digest with txnId " + digest.getLong("txnId")
                            + " that doesn't match the txnId seen previously " + digestTxnId
                            + " inspect the digests"
                            + " with the provided nonce and ensure that they are all really from the same snapshot");
                }
            }

            /*
             * Snapshots from pre 1.3 VoltDB won't have sequence numbers
             * Doing nothing will default it to zero.
             */
            if (digest.has("exportSequenceNumbers")) {
                /*
                 * An array of entries for each table
                 */
                JSONArray sequenceNumbers = digest.getJSONArray("exportSequenceNumbers");
                for (int ii = 0; ii < sequenceNumbers.length(); ii++) {
                    /*
                     * An object containing all the sequence numbers for its partitions
                     * in this table. This will be a subset since it is from a single digest
                     */
                    JSONObject tableSequenceNumbers = sequenceNumbers.getJSONObject(ii);
                    String tableName = tableSequenceNumbers.getString("exportTableName");

                    Map<Integer, Long> partitionSequenceNumbers = exportSequenceNumbers.get(tableName);
                    if (partitionSequenceNumbers == null) {
                        partitionSequenceNumbers = new HashMap<Integer, Long>();
                        exportSequenceNumbers.put(tableName, partitionSequenceNumbers);
                    }

                    /*
                     * Array of objects containing partition and sequence number pairs
                     */
                    JSONArray sourcePartitionSequenceNumbers = tableSequenceNumbers
                            .getJSONArray("sequenceNumberPerPartition");
                    for (int zz = 0; zz < sourcePartitionSequenceNumbers.length(); zz++) {
                        int partition = sourcePartitionSequenceNumbers.getJSONObject(zz).getInt("partition");
                        long sequenceNumber = sourcePartitionSequenceNumbers.getJSONObject(zz)
                                .getInt("exportSequenceNumber");
                        partitionSequenceNumbers.put(partition, sequenceNumber);
                    }
                }
            }
            if (digest.has("partitionTransactionIds")) {
                JSONObject partitionTxnIds = digest.getJSONObject("partitionTransactionIds");
                @SuppressWarnings("unchecked")
                Iterator<String> keys = partitionTxnIds.keys();
                while (keys.hasNext()) {
                    perPartitionTxnIds.add(partitionTxnIds.getLong(keys.next()));
                }
            }
        }
    } catch (JSONException e) {
        throw new VoltAbortException(e);
    }
    DigestScanResult result = new DigestScanResult();
    result.digests = digests;
    result.exportSequenceNumbers = exportSequenceNumbers;
    result.perPartitionTxnIds = Longs.toArray(perPartitionTxnIds);
    return result;
}

From source file:org.eclipse.xtend.core.macro.declaration.CompilationUnitImpl.java

private Object toArrayOfType(final Iterable<?> iterable, final Class<?> componentType) {
    Collection<?> _xifexpression = null;
    if ((iterable instanceof Collection<?>)) {
        _xifexpression = ((Collection<?>) iterable);
    } else {//from   ww  w.java 2s  .  c o  m
        _xifexpression = IterableExtensions.toList(iterable);
    }
    final Collection<?> collection = _xifexpression;
    Object _switchResult = null;
    boolean _matched = false;
    if (Objects.equal(componentType, int.class)) {
        _matched = true;
        _switchResult = Ints.toArray(((List<Integer>) collection));
    }
    if (!_matched) {
        if (Objects.equal(componentType, long.class)) {
            _matched = true;
            _switchResult = Longs.toArray(((List<Long>) collection));
        }
    }
    if (!_matched) {
        if (Objects.equal(componentType, char.class)) {
            _matched = true;
            _switchResult = Chars.toArray(((List<Character>) collection));
        }
    }
    if (!_matched) {
        if (Objects.equal(componentType, boolean.class)) {
            _matched = true;
            _switchResult = Booleans.toArray(((List<Boolean>) collection));
        }
    }
    if (!_matched) {
        if (Objects.equal(componentType, byte.class)) {
            _matched = true;
            _switchResult = Bytes.toArray(((List<Byte>) collection));
        }
    }
    if (!_matched) {
        if (Objects.equal(componentType, short.class)) {
            _matched = true;
            _switchResult = Shorts.toArray(((List<Short>) collection));
        }
    }
    if (!_matched) {
        if (Objects.equal(componentType, float.class)) {
            _matched = true;
            _switchResult = Floats.toArray(((List<Float>) collection));
        }
    }
    if (!_matched) {
        if (Objects.equal(componentType, double.class)) {
            _matched = true;
            _switchResult = Doubles.toArray(((List<Double>) collection));
        }
    }
    if (!_matched) {
        _switchResult = Iterables.<Object>toArray(collection, ((Class<Object>) componentType));
    }
    return _switchResult;
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Construct a batch of KiWiTriples from the result of an SQL query. This query differs from constructTripleFromDatabase
 * in that it does a batch-prefetching for optimized performance
 *
 * @param row a database result containing the columns described above
 * @return a KiWiTriple representation of the database result
 *//*from   w ww .j a  v  a 2 s.  c o m*/
protected List<KiWiTriple> constructTriplesFromDatabase(ResultSet row, int maxPrefetch) throws SQLException {
    int count = 0;

    // declare variables to optimize stack allocation
    KiWiTriple triple;
    long id;

    List<KiWiTriple> result = new ArrayList<>();
    Map<Long, Long[]> tripleIds = new HashMap<>();
    Set<Long> nodeIds = new HashSet<>();
    while (count < maxPrefetch && row.next()) {
        count++;

        if (row.isClosed()) {
            throw new ResultInterruptedException("retrieving results has been interrupted");
        }

        // columns: id,subject,predicate,object,context,deleted,inferred,creator,createdAt,deletedAt
        //          1 ,2      ,3        ,4     ,5      ,6      ,7       ,8      ,9        ,10

        id = row.getLong(1);

        triple = tripleCache.get(id);

        // lookup element in cache first, so we can avoid reconstructing it if it is already there
        if (triple != null) {
            result.add(triple);
        } else {

            triple = new KiWiTriple();
            triple.setId(id);

            // collect node ids for batch retrieval
            nodeIds.add(row.getLong(2));
            nodeIds.add(row.getLong(3));
            nodeIds.add(row.getLong(4));

            if (row.getLong(5) != 0) {
                nodeIds.add(row.getLong(5));
            }

            if (row.getLong(8) != 0) {
                nodeIds.add(row.getLong(8));
            }

            // remember which node ids where relevant for the triple
            tripleIds.put(id, new Long[] { row.getLong(2), row.getLong(3), row.getLong(4), row.getLong(5),
                    row.getLong(8) });

            triple.setDeleted(row.getBoolean(6));
            triple.setInferred(row.getBoolean(7));
            triple.setCreated(new Date(row.getTimestamp(9).getTime()));
            try {
                if (row.getDate(10) != null) {
                    triple.setDeletedAt(new Date(row.getTimestamp(10).getTime()));
                }
            } catch (SQLException ex) {
                // work around a MySQL problem with null dates
                // (see http://stackoverflow.com/questions/782823/handling-datetime-values-0000-00-00-000000-in-jdbc)
            }

            result.add(triple);
        }
    }

    KiWiNode[] nodes = loadNodesByIds(Longs.toArray(nodeIds));
    Map<Long, KiWiNode> nodeMap = new HashMap<>();
    for (int i = 0; i < nodes.length; i++) {
        nodeMap.put(nodes[i].getId(), nodes[i]);
    }

    for (KiWiTriple t : result) {
        if (tripleIds.containsKey(t.getId())) {
            // need to set subject, predicate, object, context and creator
            Long[] ids = tripleIds.get(t.getId());
            t.setSubject((KiWiResource) nodeMap.get(ids[0]));
            t.setPredicate((KiWiUriResource) nodeMap.get(ids[1]));
            t.setObject(nodeMap.get(ids[2]));

            if (ids[3] != 0) {
                t.setContext((KiWiResource) nodeMap.get(ids[3]));
            }

            if (ids[4] != 0) {
                t.setCreator((KiWiResource) nodeMap.get(ids[4]));
            }

        }

        cacheTriple(t);
    }

    return result;
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.java

private HopsTransactionalRequestHandler processBucketInternal(final DatanodeStorageInfo storage,
        final int bucketId, final Collection<BlockInfoContiguous> toAdd, final Collection<Block> toInvalidate,
        final Collection<BlockToMarkCorrupt> toCorrupt, final Collection<StatefulBlockInfo> toUC,
        final boolean firstBlockReport, final Map<Long, Long> mismatchedBlocksAndInodes,
        final Set<Long> aggregatedSafeBlocks, final Map<Long, Long> invalidatedReplicas,
        final BlockListAsLongs reportedBlocks) {

    return new HopsTransactionalRequestHandler(HDFSOperationType.PROCESS_REPORT) {
        @Override/*from   w ww  .  j ava 2s .c o m*/
        public void acquireLock(TransactionLocks locks) throws IOException {
            LockFactory lf = LockFactory.getInstance();
            if (reportedBlocks.getNumberOfBlocks() != 0) {
                List<Long> resolvedBlockIds = new ArrayList<>();
                List<Long> inodeIds = new ArrayList<>();
                List<Long> unResolvedBlockIds = new ArrayList<>();

                for (BlockListAsLongs.BlockReportReplica reportedBlock : reportedBlocks) {
                    Long inodeId = mismatchedBlocksAndInodes.get(reportedBlock.getBlockId());
                    if (inodeId != null) {
                        resolvedBlockIds.add(reportedBlock.getBlockId());
                        inodeIds.add(inodeId);
                    } else {
                        unResolvedBlockIds.add(reportedBlock.getBlockId());
                    }
                }

                locks.add(lf.getBlockReportingLocks(Longs.toArray(resolvedBlockIds), Longs.toArray(inodeIds),
                        Longs.toArray(unResolvedBlockIds), storage.getSid()));
            }
            locks.add(lf.getIndividualHashBucketLock(storage.getSid(), bucketId));
        }

        @Override
        public Object performTask() throws IOException {
            // scan the report and process newly reported blocks
            byte[] hash = HashBuckets.initalizeHash(); // Our updated hash should only consider
            // finalized, stored blocks
            for (BlockListAsLongs.BlockReportReplica brb : reportedBlocks) {
                Block block = new Block();
                block.setNoPersistance(brb.getBlockId(), brb.getBytesOnDisk(), brb.getGenerationStamp());
                BlockInfoContiguous storedBlock = processReportedBlock(storage, block, brb.getState(), toAdd,
                        toInvalidate, toCorrupt, toUC, aggregatedSafeBlocks, firstBlockReport,
                        mismatchedBlocksAndInodes.containsKey(brb.getBlockId()), invalidatedReplicas);
                if (storedBlock != null) {
                    mismatchedBlocksAndInodes.remove(storedBlock.getBlockId());
                    if (brb.getState() == ReplicaState.FINALIZED) {
                        // Only update hash with blocks that should not
                        // be removed and are finalized. This helps catch excess
                        // replicas as well.
                        HashBuckets.XORHashes(hash, BlockReport.hashAsFinalized(storedBlock));
                    }
                }
            }

            //update bucket hash
            HashBucket bucket = HashBuckets.getInstance().getBucket(storage.getSid(), bucketId);
            bucket.setHash(hash);
            return null;
        }
    };
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.java

private void addStoredBlockTx(final List<BlockInfoContiguous> blocks, final List<Long> blockIds,
        final List<Long> inodeIds, final DatanodeStorageInfo storage, final DatanodeDescriptor delNodeHint,
        final boolean logEveryBlock) throws IOException {
    new HopsTransactionalRequestHandler(HDFSOperationType.AFTER_PROCESS_REPORT_ADD_BLK) {
        List<INodeIdentifier> inodeIdentifiers = new ArrayList<>();

        @Override//from   w  ww .  jav  a2s .  c o m
        public void setUp() throws StorageException {
            Set<Long> addedInodeIds = new HashSet<>();
            for (long id : inodeIds) {
                if (!addedInodeIds.contains(id)) {
                    inodeIdentifiers.add(INodeUtil.resolveINodeFromId(id));
                    addedInodeIds.add(id);
                }
            }
        }

        @Override
        public void acquireLock(TransactionLocks locks) throws IOException {
            LockFactory lf = LockFactory.getInstance();
            locks.add(lf.getINodesLocks(INodeLockType.WRITE, inodeIdentifiers))
                    .add(lf.getBlockReportingLocks(Longs.toArray(blockIds), Longs.toArray(inodeIds),
                            new long[0], 0))
                    .add(lf.getBlockRelated(BLK.RE, BLK.ER, BLK.CR, BLK.PE, BLK.IV, BLK.UR));
            if (((FSNamesystem) namesystem).isErasureCodingEnabled() && !inodeIdentifiers.isEmpty()) {
                locks.add(lf.getBatchedEncodingStatusLock(LockType.WRITE, inodeIdentifiers));
            }
        }

        @Override
        public Object performTask() throws IOException {
            for (BlockInfoContiguous block : blocks) {
                Block b = addStoredBlock(block, storage, delNodeHint, logEveryBlock);
            }
            return null;
        }
    }.handle();
}