Example usage for java.lang Long MAX_VALUE

List of usage examples for java.lang Long MAX_VALUE

Introduction

In this page you can find the example usage for java.lang Long MAX_VALUE.

Prototype

long MAX_VALUE

To view the source code for java.lang Long MAX_VALUE.

Click Source Link

Document

A constant holding the maximum value a long can have, 263-1.

Usage

From source file:org.lendingclub.mercator.aws.GraphNodeGarbageCollector.java

/**
 * This is a simplistic way to "garbage collect" nodes that have not been
 * updated in the current scan. It simply looks for nodes matching a given
 * label+account+region tuple that have an updateTs before the given
 * timestamp./*from ww w  .  j  a  v a2  s . co  m*/
 * 
 * This could prove to be problematic if something goes wrong in the
 * scanning process. A better approach might be a mark-and-sweep system that
 * marks nodes as being potentially deleted, but then re-attempts locate
 * them in EC2, and only then purge them.
 * 
 * This simplistic approach is probably OK for now.
 * 
 * @param label
 * @param account
 * @param region
 * @param ts
 */
private void invokeNodeGarbageCollector(String label, String account, String region, long ts) {

    if (ts == 0 || ts == Long.MAX_VALUE) {
        // nothing to do
        return;
    }

    if (exception != null) {
        return;
    }

    if (ScannerContext.getScannerContext().isPresent()) {
        if (ScannerContext.getScannerContext().get().hasExceptions()) {
            logger.info("refusing to garbage collect because of prior exceptions");
            return;
        }
    }

    Preconditions.checkArgument(!Strings.isNullOrEmpty(label), "label not set");
    Preconditions.checkArgument(!Strings.isNullOrEmpty(account), "account not set");
    Preconditions.checkArgument(!Strings.isNullOrEmpty(region), "region not set");

    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        String cypher = "match (x:" + label
                + " {aws_account: {account}, aws_region: {region}}) where x.updateTs<{ts} detach delete x";
        getNeoRxClient().execCypher(cypher, "account", account, "region", region, "ts", ts);
    } finally {
        logger.info("purging all {} nodes in aws_account={} in region={} updated before {} - elapsed={} ms",
                label, account, region, ts, stopwatch.stop().elapsed(TimeUnit.MILLISECONDS));
    }

}

From source file:info.archinnov.achilles.it.TestEntityWithComplexTuple.java

@Test
public void should_insert() throws Exception {
    //Given//from  w ww  .jav  a2 s.  c  o  m
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    final Tuple2<Integer, Map<Integer, List<String>>> tuple = Tuple2.of(10,
            ImmutableMap.of(20, asList("10", "20")));

    final EntityWithComplexTuple entity = new EntityWithComplexTuple(id, tuple);

    //When
    manager.crud().insert(entity).execute();

    //Then
    final Row actual = session.execute("SELECT * FROM complex_tuple WHERE id = " + id).one();

    assertThat(actual).isNotNull();
    final TupleValue tupleValue = actual.getTupleValue("tuple");
    assertThat(tupleValue.getInt(0)).isEqualTo(10);
    final Map<Integer, List<String>> map = tupleValue.getMap(1, new TypeToken<Integer>() {
    }, new TypeToken<List<String>>() {
    });
    assertThat(map).containsEntry(20, asList("10", "20"));

}

From source file:com.blockwithme.time.internal.NTPClockSynchronizer.java

@Override
public long getLocalToUTCTimeOffset() throws Exception {
    final NTPUDPClient client = new NTPUDPClient();
    //      final Calendar cal = Calendar.getInstance(DEFAULT_LOCAL);
    // We want to timeout if a response takes longer than 3 seconds
    client.setDefaultTimeout(TIMEOUT);
    long offsetSum = 0L;
    int offsetCount = 0;
    long bestDelay = Long.MAX_VALUE;
    long bestOffset = Long.MAX_VALUE;
    Throwable lastException = null;
    try {/* w w w . ja v  a2s. co m*/
        client.open();
        for (int i = 0; i < ntpPool.length; i++) {
            try {
                final InetAddress hostAddr = InetAddress.getByName(ntpPool[i]);
                final TimeInfo info = client.getTime(hostAddr);
                info.computeDetails();
                final Long offsetValue = info.getOffset();
                final Long delayValue = info.getDelay();
                if ((delayValue != null) && (offsetValue != null)) {
                    //                      cal.setTimeInMillis(offsetValue
                    //                              + System.currentTimeMillis());
                    //                      final long local2UTC = -(cal.get(Calendar.ZONE_OFFSET) + cal
                    //                              .get(Calendar.DST_OFFSET));
                    if (delayValue <= 100L) {
                        offsetSum += offsetValue;// + local2UTC;
                        offsetCount++;
                    }
                    if (delayValue < bestDelay) {
                        bestDelay = delayValue;
                        bestOffset = offsetValue;// + local2UTC;
                    }
                }
            } catch (final Throwable t) {
                LOG.error("Error reading tiem through NTP", t);
                lastException = t;
            }
        }
    } catch (final Throwable t) {
        LOG.error("Error reading tiem through NTP", t);
        lastException = t;
        // NTPUDPClient can't even open at all!?!
    } finally {
        client.close();
    }
    if (offsetCount > 0) {
        return offsetSum / offsetCount;
    }
    // OK, not good result. Any result at all?
    if (bestDelay != Long.MAX_VALUE) {
        return bestOffset;
    }
    // FAIL!
    throw new Exception("Failed to get NTP time", lastException);
}

From source file:com.questdb.net.http.handlers.QueryHandlerSmallBufferTest.java

@Test(expected = MalformedChunkCodingException.class)
public void testColumnValueTooLargeForBuffer() throws Exception {
    StringBuilder allChars = new StringBuilder();
    for (char c = Character.MIN_VALUE; c < 0xD800; c++) { //
        allChars.append(c);//from   ww  w .jav a 2  s .  c  om
    }

    String allCharString = allChars.toString();
    QueryHandlerTest.generateJournal("xyz", allCharString, 1.900232E-10, 2.598E20, Long.MAX_VALUE,
            Integer.MIN_VALUE, new Timestamp(-102023));
    String query = "select x, id from xyz \n limit 1";
    QueryHandlerTest.download(query, temp);
}

From source file:info.archinnov.achilles.it.TestTypedQueries.java

@Test
public void should_perform_regular_typed_query() throws Exception {
    //Given//  w ww .  j  a v  a  2  s.c  om
    final Long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final SimpleStatement statement = new SimpleStatement("SELECT * FROM simple WHERE id = " + id);

    //When
    final SimpleEntity actual = manager.query().typedQueryForSelect(statement).getOne();

    assertThat(actual).isNotNull();
    assertThat(actual.getValue()).contains("0 AM");
}

From source file:edu.usc.goffish.gopher.sample.N_Hop_Stat_Collector.java

@Override
public void compute(List<SubGraphMessage> subGraphMessages) {

    /**/*  www .  j a v  a2s .  co m*/
     * We do this in following steps.
     * Calculate stats for each subgraph.
     * Calculate aggregate stats for partition.
     * In this case a single sub-graph will do the aggregation
     * Aggregate partition level stats and combine at the smallest partition.
     */

    if (superStep == 0) {
        SubGraphMessage msg = subGraphMessages.get(0);
        String data = new String(msg.getData());

        String[] dataSplit = data.split("#");
        N = Integer.parseInt(dataSplit[0]);
        String[] vps = dataSplit[1].split(",");
        for (String vp : vps) {
            vantagePoints.add(vp.trim());
        }

        try {

            Iterable<? extends ISubgraphInstance> subgraphInstances = subgraph.getInstances(Long.MIN_VALUE,
                    Long.MAX_VALUE, PropertySet.EmptyPropertySet, subgraph.getEdgeProperties(), false);

            //                        sliceManager.readInstances(subgraph,
            //                        Long.MIN_VALUE, Long.MAX_VALUE,
            //                        PropertySet.EmptyPropertySet, subgraph.getEdgeProperties());

            for (ISubgraphInstance instance : subgraphInstances) {

                Map<String, DescriptiveStatistics> statsMap = new HashMap<String, DescriptiveStatistics>();

                for (TemplateEdge edge : subgraph.edges()) {

                    ISubgraphObjectProperties edgeProps = instance.getPropertiesForEdge(edge.getId());

                    Integer isExist = (Integer) edgeProps.getValue(IS_EXIST_PROP);
                    if (isExist == 1) {
                        String[] vantageIps = ((String) edgeProps.getValue(VANTAGE_IP_PROP)).split(",");
                        String[] latencies = ((String) edgeProps.getValue(LATENCY_PROP)).split(",");
                        String[] hops = ((String) edgeProps.getValue(HOP_PROP)).split(",");

                        Integer[] vantangeIdx = vantageIpIndex(vantageIps);
                        if (vantangeIdx == null) {
                            continue;
                        }

                        for (int i : vantangeIdx) {

                            String vantage = vantageIps[i];
                            String latency = latencies[i];
                            String hop = hops[i];

                            double latency_num = Double.parseDouble(latency);
                            int hop_num = Integer.parseInt(hop);

                            if (latency_num >= 0 && hop_num == N) {
                                if (statsMap.containsKey(vantage)) {

                                    statsMap.get(vantage).addValue(latency_num);

                                } else {

                                    DescriptiveStatistics statistics = new DescriptiveStatistics();
                                    statistics.addValue(latency_num);
                                    statsMap.put(vantage, statistics);

                                }
                            }
                            ;

                        }

                    }

                }

                int c = 0;
                StringBuffer msgBuffer = new StringBuffer();

                for (String v : statsMap.keySet()) {
                    c++;
                    DescriptiveStatistics statistics = statsMap.get(v);
                    String m = createMessageString(v, instance.getTimestampStart(), instance.getTimestampEnd(),
                            statistics.getStandardDeviation(), statistics.getMean(), statistics.getN());

                    if (c == statsMap.keySet().size()) {
                        msgBuffer.append(m);
                    } else {

                        msgBuffer.append(m).append("|");
                    }

                }

                SubGraphMessage subMsg = new SubGraphMessage(msgBuffer.toString().getBytes());

                sentMessage(partition.getId(), subMsg);

            }

        } catch (IOException e) {
            e.printStackTrace();
            throw new RuntimeException(e);
        }

    } else if (superStep == 1) {
        //Ok here every sub-graph will receive message from its own partition.
        //Each message is belongs to a given some time span.
        Map<String, List<String[]>> vantageGroup = new HashMap<String, List<String[]>>();

        for (SubGraphMessage subGraphMessage : subGraphMessages) {

            String msgData = new String(subGraphMessage.getData());
            String[] dataParts = msgData.split("|");

            for (String data : dataParts) {
                String[] vantageParts = data.split(",");
                //Group by vantage point and startTime
                if (vantageGroup.containsKey(vantageParts[0] + "|" + vantageParts[1])) {
                    vantageGroup.get(vantageParts[0] + "|" + vantageParts[1]).add(vantageParts);
                } else {
                    ArrayList<String[]> arrayList = new ArrayList<String[]>();
                    arrayList.add(vantageParts);
                    vantageGroup.put(vantageParts[0] + "|" + vantageParts[1], arrayList);
                }

            }

        }

        for (String key : vantageGroup.keySet()) {

            if (!acquireLock(key)) {
                continue;
            }

            List<String[]> data = vantageGroup.get(key);

            double totalN = 0;
            double totalAvgVal = 0;

            double totalVar = 0;
            for (String[] d : data) {

                //average
                double mean = Double.parseDouble(d[4]);
                long sN = Long.parseLong(d[5]);
                totalN += sN;
                totalAvgVal += mean * sN;

                double sd = Double.parseDouble(d[3]);
                totalVar += ((double) sd * sd) / ((double) sN);

            }

            double avg = totalAvgVal / totalN;
            double newSD = Math.sqrt(totalVar);

            //create message
            //sent to all the partitions except me.
            String msg = key + "," + newSD + "," + avg + "," + totalN;

            for (int pid : partitions) {
                sentMessage(pid, new SubGraphMessage(msg.getBytes()));
            }

        }

    } else if (superStep >= 2) {

        if (partition.getId() == Collections.min(partitions)) {

            Map<String, List<String[]>> group = new HashMap<String, List<String[]>>();

            for (SubGraphMessage msg : subGraphMessages) {

                String data = new String(msg.getData());

                String[] dataParts = data.split(",");

                if (group.containsKey(dataParts[0])) {
                    group.get(dataParts[0]).add(dataParts);
                } else {
                    List<String[]> list = new ArrayList<String[]>();
                    list.add(dataParts);
                    group.put(dataParts[0], list);
                }

            }

            if (!acquireLock("" + partition.getId())) {
                voteToHalt();
                return;
            }

            PrintWriter writer;
            try {

                writer = new PrintWriter(new FileWriter("TimeSeriesStats.csv"));
            } catch (IOException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            }
            for (String key : group.keySet()) {

                List<String[]> data = group.get(key);

                double totalN = 0;
                double totalAvgVal = 0;

                double totalVar = 0;
                for (String[] d : data) {

                    //average

                    //key + "," + newSD + "," + avg + "," + totalN;
                    double mean = Double.parseDouble(d[2]);
                    long sN = Long.parseLong(d[3]);
                    totalN += sN;
                    totalAvgVal += mean * sN;

                    double sd = Double.parseDouble(d[1]);
                    totalVar += ((double) sd * sd) / ((double) sN);

                }

                double avg = totalAvgVal / totalN;
                double newSD = Math.sqrt(totalVar);

                String vantage = key.split("|")[0];
                String timeStamp = key.split("|")[1];

                log(writer, vantage, timeStamp, avg, newSD);

            }
            writer.flush();
            voteToHalt();

        }
    }

}

From source file:info.archinnov.achilles.test.integration.tests.bugs.WrongConsistencyForSliceQueryIT.java

@Test
public void should_slice_query_with_runtime_consistency_level() throws Exception {
    //Given/*from  w  ww. j  a va2s. co  m*/
    Long id = RandomUtils.nextLong(0, Long.MAX_VALUE);
    Date date1 = new Date(1);

    final ClusteredEntityWithConsistencyLevel entity1 = new ClusteredEntityWithConsistencyLevel(id, date1, "1");

    manager.insert(entity1);

    //When
    logAsserter.prepareLogLevelForDriverConnection();

    final ClusteredEntityWithConsistencyLevel found = manager
            .sliceQuery(ClusteredEntityWithConsistencyLevel.class).forSelect().withPartitionComponents(id)
            .fromClusterings(date1).withConsistency(ConsistencyLevel.ALL).getOne();

    //Then
    assertThat(found.getId().getDate()).isEqualTo(date1);
    logAsserter.assertConsistencyLevels(ConsistencyLevel.ALL);

}

From source file:com.linkedin.drelephant.tez.TezMetricsAggregator.java

@Override
public void aggregate(HadoopApplicationData hadoopData) {

    TezApplicationData data = (TezApplicationData) hadoopData;

    long mapTaskContainerSize = getMapContainerSize(data);
    long reduceTaskContainerSize = getReducerContainerSize(data);

    int reduceTaskSlowStartPercentage = (int) (Double
            .parseDouble(data.getConf().getProperty(REDUCER_SLOW_START_CONFIG)) * 100);

    //overwrite reduceTaskSlowStartPercentage to 100%. TODO: make use of the slow start percent
    reduceTaskSlowStartPercentage = 100;

    _mapTasks = new TezTaskLevelAggregatedMetrics(data.getMapTaskData(), mapTaskContainerSize,
            data.getStartTime());/*from  www .  j a v  a  2  s .  co m*/

    long reduceIdealStartTime = _mapTasks.getNthPercentileFinishTime(reduceTaskSlowStartPercentage);

    // Mappers list is empty
    if (reduceIdealStartTime == -1) {
        // ideal start time for reducer is infinite since it cannot start
        reduceIdealStartTime = Long.MAX_VALUE;
    }

    _reduceTasks = new TezTaskLevelAggregatedMetrics(data.getReduceTaskData(), reduceTaskContainerSize,
            reduceIdealStartTime);

    _hadoopAggregatedData.setResourceUsed(_mapTasks.getResourceUsed() + _reduceTasks.getResourceUsed());
    _hadoopAggregatedData.setTotalDelay(_mapTasks.getDelay() + _reduceTasks.getDelay());
    _hadoopAggregatedData.setResourceWasted(_mapTasks.getResourceWasted() + _reduceTasks.getResourceWasted());
}

From source file:c5db.client.ProtobufUtil.java

/**
 * Create a protocol buffer Get based on a client Get.
 *
 * @param get           The client Get.//w  w w.j  a v a  2 s.c o  m
 * @param existenceOnly Is this only an existence check.
 * @return a protocol buffer Get
 * @throws IOException
 */
@NotNull
public static c5db.client.generated.Get toGet(final Get get, boolean existenceOnly) throws IOException {

    c5db.client.generated.TimeRange timeRange;

    ByteBuffer row = ByteBuffer.wrap(get.getRow());
    boolean cacheBlocks = get.getCacheBlocks();
    int maxVersions = get.getMaxVersions();
    List<Column> columns = new ArrayList<>();
    List<NameBytesPair> attributes = new ArrayList<>();

    int storeLimit;
    int storeOffset;

    c5db.client.generated.Filter filter = get.getFilter() == null ? null
            : ProtobufUtil.toFilter(get.getFilter());

    if (!get.getTimeRange().isAllTime()) {
        timeRange = new c5db.client.generated.TimeRange(get.getTimeRange().getMin(),
                get.getTimeRange().getMax());
    } else {
        timeRange = new c5db.client.generated.TimeRange(0, Long.MAX_VALUE);
    }
    Map<String, byte[]> attributesMap = get.getAttributesMap();
    if (!attributes.isEmpty()) {
        for (Map.Entry<String, byte[]> attribute : attributesMap.entrySet()) {
            NameBytesPair updatedAttribute = new NameBytesPair(attribute.getKey(),
                    ByteBuffer.wrap(attribute.getValue()));
            attributes.add(updatedAttribute);
        }
    }
    if (get.hasFamilies()) {
        Map<byte[], NavigableSet<byte[]>> families = get.getFamilyMap();
        for (Map.Entry<byte[], NavigableSet<byte[]>> family : families.entrySet()) {
            List<ByteBuffer> qualifiers = new ArrayList<>();
            if (family.getValue() != null) {
                for (byte[] qualifier : family.getValue()) {
                    qualifiers.add(ByteBuffer.wrap(qualifier));
                }
            }

            Column column = new Column(ByteBuffer.wrap(family.getKey()), qualifiers);
            columns.add(column);
        }

    }
    storeLimit = get.getMaxResultsPerColumnFamily();
    storeOffset = get.getRowOffsetPerColumnFamily();

    return new c5db.client.generated.Get(row, columns, attributes, filter, timeRange, maxVersions, cacheBlocks,
            storeLimit, storeOffset, existenceOnly, false);
}

From source file:com.zlk.bigdemo.android.volley.toolbox.HttpHeaderParser.java

public static Cache.Entry parseImageCacheHeaders(NetworkResponse response) {
    Map<String, String> headers = response.headers;
    long serverDate = 0;
    String headerValue = headers.get("Date");
    if (headerValue != null) {
        serverDate = parseDateAsEpoch(headerValue);
    }//from  www.  j  a  va 2 s.  c  om

    Cache.Entry entry = new Cache.Entry();
    entry.data = response.data;
    entry.etag = headers.get("ETag");
    entry.softTtl = Long.MAX_VALUE;
    entry.ttl = entry.softTtl;
    entry.serverDate = serverDate;
    entry.responseHeaders = headers;

    return entry;
}