Example usage for com.google.common.primitives Longs asList

List of usage examples for com.google.common.primitives Longs asList

Introduction

In this page you can find the example usage for com.google.common.primitives Longs asList.

Prototype

public static List<Long> asList(long... backingArray) 

Source Link

Document

Returns a fixed-size list backed by the specified array, similar to Arrays#asList(Object[]) .

Usage

From source file:org.apache.accumulo.tracer.AsyncSpanReceiver.java

@Override
public void receiveSpan(Span s) {
    if (s.getStopTimeMillis() - s.getStartTimeMillis() < minSpanSize) {
        return;/*from w w  w .java 2s .  c  o  m*/
    }

    Map<String, String> data = s.getKVAnnotations();

    SpanKey dest = getSpanKey(data);
    if (dest != null) {
        List<Annotation> annotations = convertToAnnotations(s.getTimelineAnnotations());
        if (sendQueueSize.get() > maxQueueSize) {
            long now = System.currentTimeMillis();
            if (now - lastNotificationOfDroppedSpans > 60 * 1000) {
                log.warn("Tracing spans are being dropped because there are already"
                        + " {} spans queued for delivery.\n"
                        + "This does not affect performance, security or data integrity,"
                        + " but distributed tracing information is being lost.", maxQueueSize);
                lastNotificationOfDroppedSpans = now;
            }
            return;
        }
        sendQueue.add(new RemoteSpan(host, service == null ? processId : service, s.getTraceId(), s.getSpanId(),
                Longs.asList(s.getParents()), s.getStartTimeMillis(), s.getStopTimeMillis(), s.getDescription(),
                data, annotations));
        sendQueueSize.incrementAndGet();
    }
}

From source file:org.glowroot.agent.embedded.repo.GaugeValueDao.java

private static AtomicLongArray initData(ImmutableList<RollupConfig> rollupConfigs, DataSource dataSource)
        throws Exception {
    List<String> columnNames = Lists.newArrayList();
    for (int i = 1; i <= rollupConfigs.size(); i++) {
        columnNames.add("last_rollup_" + i + "_time");
    }/*from  w w w.  j a va2  s . c om*/
    Joiner joiner = Joiner.on(", ");
    String selectClause = castUntainted(joiner.join(columnNames));
    long[] lastRollupTimes = dataSource.query(new LastRollupTimesQuery(selectClause));
    if (lastRollupTimes.length == 0) {
        long[] values = new long[rollupConfigs.size()];
        String valueClause = castUntainted(joiner.join(Longs.asList(values)));
        dataSource.update("insert into gauge_value_last_rollup_times (" + selectClause + ") values ("
                + valueClause + ")");
        return new AtomicLongArray(values);
    } else {
        return new AtomicLongArray(lastRollupTimes);
    }
}

From source file:org.apache.hive.service.cli.Column.java

public TColumn toTColumn() {
    TColumn value = new TColumn();
    ByteBuffer nullMasks = ByteBuffer.wrap(toBinary(nulls));
    switch (type) {
    case BOOLEAN_TYPE:
        value.setBoolVal(new TBoolColumn(Booleans.asList(Arrays.copyOfRange(boolVars, 0, size)), nullMasks));
        break;/*from w ww . ja  v  a 2 s  .  c o  m*/
    case TINYINT_TYPE:
        value.setByteVal(new TByteColumn(Bytes.asList(Arrays.copyOfRange(byteVars, 0, size)), nullMasks));
        break;
    case SMALLINT_TYPE:
        value.setI16Val(new TI16Column(Shorts.asList(Arrays.copyOfRange(shortVars, 0, size)), nullMasks));
        break;
    case INT_TYPE:
        value.setI32Val(new TI32Column(Ints.asList(Arrays.copyOfRange(intVars, 0, size)), nullMasks));
        break;
    case BIGINT_TYPE:
        value.setI64Val(new TI64Column(Longs.asList(Arrays.copyOfRange(longVars, 0, size)), nullMasks));
        break;
    case DOUBLE_TYPE:
        value.setDoubleVal(
                new TDoubleColumn(Doubles.asList(Arrays.copyOfRange(doubleVars, 0, size)), nullMasks));
        break;
    case STRING_TYPE:
        value.setStringVal(new TStringColumn(stringVars, nullMasks));
        break;
    case BINARY_TYPE:
        value.setBinaryVal(new TBinaryColumn(binaryVars, nullMasks));
        break;
    }
    return value;
}

From source file:net.conquiris.lucene.search.SortBuilder.java

/**
 * Creates a sort field specified by the provided value list.
 * @param field Field identified by an schema item.
 * @param values Value list to use.//from   ww  w  . j  a  v a2  s .  c  om
 * @throws IllegalArgumentException if the provided schema item is not indexed.
 */
public static SortField field(LongSchemaItem field, long... values) {
    return field(field, Longs.asList(values));
}

From source file:org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB.java

@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId, long[] blockIds,
        List<Token<BlockTokenIdentifier>> tokens) throws IOException {
    List<TokenProto> tokensProtos = new ArrayList<>(tokens.size());
    for (Token<BlockTokenIdentifier> t : tokens) {
        tokensProtos.add(PBHelper.convert(t));
    }//from   ww  w. ja  va  2 s  .com
    // Build the request
    GetHdfsBlockLocationsRequestProto request = GetHdfsBlockLocationsRequestProto.newBuilder()
            .setBlockPoolId(blockPoolId).addAllBlockIds(Longs.asList(blockIds)).addAllTokens(tokensProtos)
            .build();
    // Send the RPC
    GetHdfsBlockLocationsResponseProto response;
    try {
        response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
    } catch (ServiceException e) {
        throw ProtobufHelper.getRemoteException(e);
    }
    // List of volumes in the response
    List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
    List<byte[]> volumeIds = new ArrayList<>(volumeIdsByteStrings.size());
    for (ByteString bs : volumeIdsByteStrings) {
        volumeIds.add(bs.toByteArray());
    }
    // Array of indexes into the list of volumes, one per block
    List<Integer> volumeIndexes = response.getVolumeIndexesList();
    // Parsed HdfsVolumeId values, one per block
    return new HdfsBlocksMetadata(blockPoolId, blockIds, volumeIds, volumeIndexes);
}

From source file:org.apache.hadoop.hive.serde2.thrift.ColumnBuffer.java

public TColumn toTColumn() {
    TColumn value = new TColumn();
    ByteBuffer nullMasks = ByteBuffer.wrap(toBinary(nulls));
    switch (type) {
    case BOOLEAN_TYPE:
        value.setBoolVal(new TBoolColumn(Booleans.asList(Arrays.copyOfRange(boolVars, 0, size)), nullMasks));
        break;/*ww w  .ja va 2s  .  c o m*/
    case TINYINT_TYPE:
        value.setByteVal(new TByteColumn(Bytes.asList(Arrays.copyOfRange(byteVars, 0, size)), nullMasks));
        break;
    case SMALLINT_TYPE:
        value.setI16Val(new TI16Column(Shorts.asList(Arrays.copyOfRange(shortVars, 0, size)), nullMasks));
        break;
    case INT_TYPE:
        value.setI32Val(new TI32Column(Ints.asList(Arrays.copyOfRange(intVars, 0, size)), nullMasks));
        break;
    case BIGINT_TYPE:
        value.setI64Val(new TI64Column(Longs.asList(Arrays.copyOfRange(longVars, 0, size)), nullMasks));
        break;
    case FLOAT_TYPE:
    case DOUBLE_TYPE:
        value.setDoubleVal(
                new TDoubleColumn(Doubles.asList(Arrays.copyOfRange(doubleVars, 0, size)), nullMasks));
        break;
    case STRING_TYPE:
        value.setStringVal(new TStringColumn(stringVars, nullMasks));
        break;
    case BINARY_TYPE:
        value.setBinaryVal(new TBinaryColumn(binaryVars, nullMasks));
        break;
    }
    return value;
}

From source file:org.iq80.snappy.SnappyBench.java

private static long getMedianValue(long[] benchmarkRuns) {
    ArrayList<Long> list = new ArrayList<Long>(Longs.asList(benchmarkRuns));
    Collections.sort(list);//from  w w  w  . j  av  a2s .  co m
    return list.get(benchmarkRuns.length / 2);
}

From source file:io.airlift.compress.SnappyBench.java

private long getMedianValue(long[] benchmarkRuns) {
    ArrayList<Long> list = new ArrayList<Long>(Longs.asList(benchmarkRuns));
    Collections.sort(list);//from w w  w .  j av  a 2  s  .com
    return list.get(benchmarkRuns.length / 2);
}

From source file:org.glowroot.agent.weaving.Weaver.java

private void checkForDeadlockedActiveWeaving(List<Long> activeWeavingThreadIds) {
    ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
    long[] deadlockedThreadIds = threadBean.findDeadlockedThreads();
    if (deadlockedThreadIds == null
            || Collections.disjoint(Longs.asList(deadlockedThreadIds), activeWeavingThreadIds)) {
        return;//from   www. j  av a 2 s  .  c  o  m
    }
    // need to disable weaving, otherwise getThreadInfo can trigger class loading and itself get
    // blocked by the deadlocked threads
    weavingDisabledForLoggingDeadlock = true;
    try {
        @Nullable
        ThreadInfo[] threadInfos = threadBean.getThreadInfo(deadlockedThreadIds,
                threadBean.isObjectMonitorUsageSupported(), false);
        StringBuilder sb = new StringBuilder();
        for (ThreadInfo threadInfo : threadInfos) {
            if (threadInfo != null) {
                sb.append('\n');
                appendThreadInfo(sb, threadInfo);
            }
        }
        logger.error("deadlock detected in class weaving, please report to the Glowroot" + " project:\n{}", sb);
        // no need to keep checking for (and logging) deadlocked active weaving
        throw new TerminateSubsequentExecutionsException();
    } finally {
        weavingDisabledForLoggingDeadlock = false;
    }
}

From source file:io.atomix.protocols.raft.session.impl.RaftSessionManager.java

/**
 * Sends a keep-alive request to the cluster.
 *///from   ww  w  .  j a v a 2  s .  com
private synchronized void keepAliveSessions(long lastKeepAliveTime, long sessionTimeout) {
    // Filter the list of sessions by timeout.
    List<RaftSessionState> needKeepAlive = sessions.values().stream()
            .filter(session -> session.getSessionTimeout() == sessionTimeout).collect(Collectors.toList());

    // If no sessions need keep-alives to be sent, skip and reschedule the keep-alive.
    if (needKeepAlive.isEmpty()) {
        return;
    }

    // Allocate session IDs, command response sequence numbers, and event index arrays.
    long[] sessionIds = new long[needKeepAlive.size()];
    long[] commandResponses = new long[needKeepAlive.size()];
    long[] eventIndexes = new long[needKeepAlive.size()];

    // For each session that needs to be kept alive, populate batch request arrays.
    int i = 0;
    for (RaftSessionState sessionState : needKeepAlive) {
        sessionIds[i] = sessionState.getSessionId().id();
        commandResponses[i] = sessionState.getCommandResponse();
        eventIndexes[i] = sessionState.getEventIndex();
        i++;
    }

    log.trace("Keeping {} sessions alive", sessionIds.length);

    KeepAliveRequest request = KeepAliveRequest.builder().withSessionIds(sessionIds)
            .withCommandSequences(commandResponses).withEventIndexes(eventIndexes).build();

    long keepAliveTime = System.currentTimeMillis();
    connection.keepAlive(request).whenComplete((response, error) -> {
        if (open.get()) {
            long delta = System.currentTimeMillis() - keepAliveTime;
            if (error == null) {
                // If the request was successful, update the address selector and schedule the next keep-alive.
                if (response.status() == RaftResponse.Status.OK) {
                    selectorManager.resetAll(response.leader(), response.members());

                    // Iterate through sessions and close sessions that weren't kept alive by the request (have already been closed).
                    Set<Long> keptAliveSessions = Sets.newHashSet(Longs.asList(response.sessionIds()));
                    for (RaftSessionState session : needKeepAlive) {
                        if (keptAliveSessions.contains(session.getSessionId().id())) {
                            session.setState(PrimitiveState.CONNECTED);
                        } else {
                            session.setState(PrimitiveState.EXPIRED);
                        }
                    }
                    scheduleKeepAlive(System.currentTimeMillis(), sessionTimeout, delta);
                }
                // If the timeout has not been passed, attempt to keep the session alive again with no delay.
                // We will continue to retry until the session expiration has passed.
                else if (System.currentTimeMillis() - lastKeepAliveTime < sessionTimeout) {
                    selectorManager.resetAll(null, connection.members());
                    keepAliveSessions(lastKeepAliveTime, sessionTimeout);
                }
                // If no leader was set, set the session state to unstable and schedule another keep-alive.
                else {
                    needKeepAlive.forEach(s -> s.setState(PrimitiveState.SUSPENDED));
                    selectorManager.resetAll();
                    scheduleKeepAlive(lastKeepAliveTime, sessionTimeout, delta);
                }
            }
            // If the timeout has not been passed, reset the connection and attempt to keep the session alive
            // again with no delay.
            else if (System.currentTimeMillis() - lastKeepAliveTime < sessionTimeout
                    && connection.leader() != null) {
                selectorManager.resetAll(null, connection.members());
                keepAliveSessions(lastKeepAliveTime, sessionTimeout);
            }
            // If no leader was set, set the session state to unstable and schedule another keep-alive.
            else {
                needKeepAlive.forEach(s -> s.setState(PrimitiveState.SUSPENDED));
                selectorManager.resetAll();
                scheduleKeepAlive(lastKeepAliveTime, sessionTimeout, delta);
            }
        }
    });
}