List of usage examples for java.lang Long MIN_VALUE
long MIN_VALUE
To view the source code for java.lang Long MIN_VALUE.
Click Source Link
From source file:dk.netarkivet.harvester.harvesting.frontier.FrontierReportCsvExport.java
private static String getDisplayValue(long val) { return Long.MIN_VALUE == val ? FrontierReportLine.EMPTY_VALUE_TOKEN : "" + val; }
From source file:ac.elements.parser.SimpleDBConverter.java
/** * Decodes zero-padded positive long value from the string representation * /*from w ww .j a v a 2 s . c o m*/ * com.xerox.amazonws.sdb.DataUtils * * @param value * zero-padded string representation of the long * @return original long value */ private static long decodeLong(String value) { BigInteger bi = new BigInteger(value, RADIX); bi = bi.add(BigInteger.valueOf(Long.MIN_VALUE)); return bi.longValue(); }
From source file:com.yahoo.glimmer.indexing.generator.TermReduce.java
@Override public void reduce(TermKey key, Iterable<TermValue> values, Context context) throws IOException, InterruptedException { if (key == null || key.equals("")) { return;/* ww w. j a v a2 s. c o m*/ } if (termKeysProcessed % 10000 == 0) { String statusString = "Reducing " + key.toString(); context.setStatus(statusString); LOG.info(statusString); } writerKey.set(key.getIndex()); if (key.getIndex() == DocumentMapper.ALIGNMENT_INDEX) { long lastPredicateId = Long.MIN_VALUE; for (TermValue value : values) { if (value.getType() != Type.INDEX_ID) { throw new IllegalStateException( "Got a " + value.getType() + " value when expecting only " + Type.INDEX_ID); } if (lastPredicateId != value.getV1()) { lastPredicateId = value.getV1(); predicatedIds.add(lastPredicateId); } } writerTermValue.setTerm(key.getTerm()); writerTermValue.setOccurrenceCount(0); writerTermValue.setTermFrequency(predicatedIds.size()); writerTermValue.setSumOfMaxTermPositions(0); context.write(writerKey, writerTermValue); for (Long predicateId : predicatedIds) { writerDocValue.setDocument(predicateId); context.write(writerKey, writerDocValue); } predicatedIds.clear(); } else if (TermKey.DOC_SIZE_TERM.equals(key.getTerm())) { // Write .sizes files Iterator<TermValue> valuesIt = values.iterator(); while (valuesIt.hasNext()) { TermValue value = valuesIt.next(); if (Type.DOC_SIZE != value.getType()) { throw new IllegalStateException( "Got a " + value.getType() + " value when expecting only " + Type.DOC_SIZE); } writerSizeValue.setDocument(value.getV1()); writerSizeValue.setSize(value.getV2()); context.write(writerKey, writerSizeValue); } } else { int termFrequency = 0; int termCount = 0; int sumOfMaxTermPositions = 0; TermValue value = null; Iterator<TermValue> valuesIt = values.iterator(); while (valuesIt.hasNext()) { value = valuesIt.next(); if (Type.TERM_STATS != value.getType()) { break; } termFrequency++; termCount += value.getV1(); sumOfMaxTermPositions += value.getV2(); } if (Type.OCCURRENCE != value.getType()) { throw new IllegalStateException( "Got a " + value.getType() + " value when expecting only " + Type.OCCURRENCE); } writerTermValue.setTerm(key.getTerm()); writerTermValue.setOccurrenceCount(termCount); writerTermValue.setTermFrequency(termFrequency); writerTermValue.setSumOfMaxTermPositions(sumOfMaxTermPositions); context.write(writerKey, writerTermValue); TermValue prevValue = new TermValue(); prevValue.set(value); while (value != null && value.getType() == Type.OCCURRENCE) { long docId = value.getV1(); if (docId < 0) { throw new IllegalStateException("Negative DocID. Key:" + key + "\nValue:" + value); } if (docId != prevValue.getV1()) { // New document, write out previous postings writerDocValue.setDocument(prevValue.getV1()); context.write(writerKey, writerDocValue); // The first occerrence of this docId/ writerDocValue.clearOccerrences(); writerDocValue.addOccurrence(value.getV2()); } else { writerDocValue.addOccurrence(value.getV2()); } prevValue.set(value); boolean last = false; if (valuesIt.hasNext()) { value = valuesIt.next(); // LOG.warn("Value:" + value.toString()); // Skip equivalent occurrences if (value.equals(prevValue)) { // This should never happen.. Is it legacy code? throw new IllegalStateException("For indexId " + key.getIndex() + " and term " + key.getTerm() + " got a duplicate occurrence " + value.toString()); } while (value.equals(prevValue) && valuesIt.hasNext()) { value = valuesIt.next(); } if (value.equals(prevValue) && !valuesIt.hasNext()) { last = true; } } else { last = true; } if (last) { // This is the last occurrence: write out the remaining // positions writerDocValue.setDocument(prevValue.getV1()); if (writerDocValue.getDocument() < 0) { throw new IllegalStateException("Negative DocID. Key:" + key + "\nprevValue:" + prevValue + "\nValue:" + value + "\nwriterDocValue:" + writerDocValue); } context.write(writerKey, writerDocValue); writerDocValue.clearOccerrences(); value = null; } } } termKeysProcessed++; }
From source file:nl.gridline.zieook.inx.movielens.hbase.CollectionFilterMap.java
@Override protected void setup(Context context) throws IOException, InterruptedException { // get filter: // key / value filterKey = context.getConfiguration().get(TaskConfig.FILTER_KEY); String values = context.getConfiguration().get(TaskConfig.FILTER_VALUE); if (values != null) { String[] array = values.split("\n"); filter = new HashSet<String>(array.length); for (String v : array) { filter.add(v.trim());/* w w w . j a v a2 s .co m*/ } } String startStr = context.getConfiguration().get(TaskConfig.FILTER_STARTDATE); if (startStr != null) { startdate = Long.parseLong(startStr); } else { startdate = Long.MIN_VALUE; } String endStr = context.getConfiguration().get(TaskConfig.FILTER_ENDDATE); if (endStr != null) { enddate = Long.parseLong(endStr); } else { enddate = Long.MAX_VALUE; } LOG.info("Collection filter map config: filterKey:" + filterKey + " filter:" + filter + " startdate:" + startdate + " enddate:" + enddate); }
From source file:net.aksingh.owmjapis.AbstractWeather.java
AbstractWeather(JSONObject jsonObj) { super(jsonObj); long sec = (jsonObj != null) ? jsonObj.optLong(JSON_DATE_TIME, Long.MIN_VALUE) : Long.MIN_VALUE; if (sec != Long.MIN_VALUE) { // converting seconds to Date object this.dateTime = new Date(sec * 1000); } else {/*from w w w. jav a 2 s. c om*/ this.dateTime = null; } JSONArray weatherArray = (jsonObj != null) ? jsonObj.optJSONArray(JSON_WEATHER) : new JSONArray(); this.weatherList = (weatherArray != null) ? new ArrayList<Weather>(weatherArray.length()) : Collections.EMPTY_LIST; if (weatherArray != null && this.weatherList != Collections.EMPTY_LIST) { for (int i = 0; i < weatherArray.length(); i++) { JSONObject weatherObj = weatherArray.optJSONObject(i); if (weatherObj != null) { this.weatherList.add(new Weather(weatherObj)); } } } this.weatherCount = this.weatherList.size(); }
From source file:edu.usc.goffish.gopher.sample.N_Hop_Stat_Collector.java
@Override public void compute(List<SubGraphMessage> subGraphMessages) { /**// w w w. j av a2s . c o m * We do this in following steps. * Calculate stats for each subgraph. * Calculate aggregate stats for partition. * In this case a single sub-graph will do the aggregation * Aggregate partition level stats and combine at the smallest partition. */ if (superStep == 0) { SubGraphMessage msg = subGraphMessages.get(0); String data = new String(msg.getData()); String[] dataSplit = data.split("#"); N = Integer.parseInt(dataSplit[0]); String[] vps = dataSplit[1].split(","); for (String vp : vps) { vantagePoints.add(vp.trim()); } try { Iterable<? extends ISubgraphInstance> subgraphInstances = subgraph.getInstances(Long.MIN_VALUE, Long.MAX_VALUE, PropertySet.EmptyPropertySet, subgraph.getEdgeProperties(), false); // sliceManager.readInstances(subgraph, // Long.MIN_VALUE, Long.MAX_VALUE, // PropertySet.EmptyPropertySet, subgraph.getEdgeProperties()); for (ISubgraphInstance instance : subgraphInstances) { Map<String, DescriptiveStatistics> statsMap = new HashMap<String, DescriptiveStatistics>(); for (TemplateEdge edge : subgraph.edges()) { ISubgraphObjectProperties edgeProps = instance.getPropertiesForEdge(edge.getId()); Integer isExist = (Integer) edgeProps.getValue(IS_EXIST_PROP); if (isExist == 1) { String[] vantageIps = ((String) edgeProps.getValue(VANTAGE_IP_PROP)).split(","); String[] latencies = ((String) edgeProps.getValue(LATENCY_PROP)).split(","); String[] hops = ((String) edgeProps.getValue(HOP_PROP)).split(","); Integer[] vantangeIdx = vantageIpIndex(vantageIps); if (vantangeIdx == null) { continue; } for (int i : vantangeIdx) { String vantage = vantageIps[i]; String latency = latencies[i]; String hop = hops[i]; double latency_num = Double.parseDouble(latency); int hop_num = Integer.parseInt(hop); if (latency_num >= 0 && hop_num == N) { if (statsMap.containsKey(vantage)) { statsMap.get(vantage).addValue(latency_num); } else { DescriptiveStatistics statistics = new DescriptiveStatistics(); statistics.addValue(latency_num); statsMap.put(vantage, statistics); } } ; } } } int c = 0; StringBuffer msgBuffer = new StringBuffer(); for (String v : statsMap.keySet()) { c++; DescriptiveStatistics statistics = statsMap.get(v); String m = createMessageString(v, instance.getTimestampStart(), instance.getTimestampEnd(), statistics.getStandardDeviation(), statistics.getMean(), statistics.getN()); if (c == statsMap.keySet().size()) { msgBuffer.append(m); } else { msgBuffer.append(m).append("|"); } } SubGraphMessage subMsg = new SubGraphMessage(msgBuffer.toString().getBytes()); sentMessage(partition.getId(), subMsg); } } catch (IOException e) { e.printStackTrace(); throw new RuntimeException(e); } } else if (superStep == 1) { //Ok here every sub-graph will receive message from its own partition. //Each message is belongs to a given some time span. Map<String, List<String[]>> vantageGroup = new HashMap<String, List<String[]>>(); for (SubGraphMessage subGraphMessage : subGraphMessages) { String msgData = new String(subGraphMessage.getData()); String[] dataParts = msgData.split("|"); for (String data : dataParts) { String[] vantageParts = data.split(","); //Group by vantage point and startTime if (vantageGroup.containsKey(vantageParts[0] + "|" + vantageParts[1])) { vantageGroup.get(vantageParts[0] + "|" + vantageParts[1]).add(vantageParts); } else { ArrayList<String[]> arrayList = new ArrayList<String[]>(); arrayList.add(vantageParts); vantageGroup.put(vantageParts[0] + "|" + vantageParts[1], arrayList); } } } for (String key : vantageGroup.keySet()) { if (!acquireLock(key)) { continue; } List<String[]> data = vantageGroup.get(key); double totalN = 0; double totalAvgVal = 0; double totalVar = 0; for (String[] d : data) { //average double mean = Double.parseDouble(d[4]); long sN = Long.parseLong(d[5]); totalN += sN; totalAvgVal += mean * sN; double sd = Double.parseDouble(d[3]); totalVar += ((double) sd * sd) / ((double) sN); } double avg = totalAvgVal / totalN; double newSD = Math.sqrt(totalVar); //create message //sent to all the partitions except me. String msg = key + "," + newSD + "," + avg + "," + totalN; for (int pid : partitions) { sentMessage(pid, new SubGraphMessage(msg.getBytes())); } } } else if (superStep >= 2) { if (partition.getId() == Collections.min(partitions)) { Map<String, List<String[]>> group = new HashMap<String, List<String[]>>(); for (SubGraphMessage msg : subGraphMessages) { String data = new String(msg.getData()); String[] dataParts = data.split(","); if (group.containsKey(dataParts[0])) { group.get(dataParts[0]).add(dataParts); } else { List<String[]> list = new ArrayList<String[]>(); list.add(dataParts); group.put(dataParts[0], list); } } if (!acquireLock("" + partition.getId())) { voteToHalt(); return; } PrintWriter writer; try { writer = new PrintWriter(new FileWriter("TimeSeriesStats.csv")); } catch (IOException e) { e.printStackTrace(); throw new RuntimeException(e); } for (String key : group.keySet()) { List<String[]> data = group.get(key); double totalN = 0; double totalAvgVal = 0; double totalVar = 0; for (String[] d : data) { //average //key + "," + newSD + "," + avg + "," + totalN; double mean = Double.parseDouble(d[2]); long sN = Long.parseLong(d[3]); totalN += sN; totalAvgVal += mean * sN; double sd = Double.parseDouble(d[1]); totalVar += ((double) sd * sd) / ((double) sN); } double avg = totalAvgVal / totalN; double newSD = Math.sqrt(totalVar); String vantage = key.split("|")[0]; String timeStamp = key.split("|")[1]; log(writer, vantage, timeStamp, avg, newSD); } writer.flush(); voteToHalt(); } } }
From source file:org.cloudfoundry.metron.MetronMetricWriterTest.java
@Test public void setNoSession() { this.metricWriter.set(new Metric<>("test-name", Long.MIN_VALUE)); verifyZeroInteractions(this.async); }
From source file:com.tilab.fiware.metaware.service.DiscoverObjServiceTest.java
@BeforeClass public static void setUpClass() { testProperties = new Properties(); testProperties.setProperty("db.host", "localhost"); testProperties.setProperty("db.port", "27017"); testProperties.setProperty("db.name", "MetadataRepoTest"); // DB test INSTANCE.setManualProperties(testProperties); INSTANCE.createCoreObjects();/*from w w w . j a v a 2 s.co m*/ // Objects definition comp = new Company("company test name", "company test description", "company@test.com", "123456", "Via Reiss Romoli, 274 Torino", "company.test.one.com"); dep = new Department("department test name", "department test description", null, // company id is set after "dep@test.com", "123456", "Via Reiss Romoli, 274 Torino", "http://dep.test.com"); user1 = new User("user test name 1", "user test surname 1", "user1@test.com", "123456", "Via Reiss Romoli, 274 Torino", null, null, "usernametestalgo1", "secret", ""); // company id and department id are set after user2 = new User("user test name 2", "user test surname 2", "user2@test.com", "654321", "Via Reiss Romoli, 274 Torino", null, null, "usernametestalgo2", "secret", ""); // company id and department id are set after perm1 = new Permission(null, "rud"); // user id is set after perm2 = new Permission(null, "r"); // user id is set after algo1 = new Algorithm("algorithm test name 1", "algorithm test description 1", "test", Long.MIN_VALUE, Long.MIN_VALUE, null, null, "private", "model test 1", "sub-model test 1", "hive query test 1", Long.MIN_VALUE, 42, "algo.test.one.com"); // perm list and owner id are inserted after algo2 = new Algorithm("algorithm test name 2", "algorithm test description 2", "test", Long.MIN_VALUE, Long.MIN_VALUE, null, null, "private", "model test 2", "sub-model test 2", "hive query test 2", Long.MIN_VALUE, 42, "algo.test.two.com"); // perm list and owner id are inserted after data1 = new Dataset("dataset test name 1", "dataset test description 1", "test", Long.MIN_VALUE, Long.MIN_VALUE, null, null, "private", true, new DatasetStructure()); // perm list and owner is are inserted after data2 = new Dataset("dataset test name 2", "dataset test description 2", "test", Long.MIN_VALUE, Long.MIN_VALUE, null, null, "private", true, new DatasetStructure()); // perm list and owner is are inserted after }
From source file:org.apache.directory.server.core.partition.impl.btree.jdbm.BTreeRedirectMarshallerTest.java
@Test public void testLongMinValue() throws IOException { byte[] bites = createBites(); bites[1] = (byte) 0x80; assertEquals(Long.MIN_VALUE, marshaller.deserialize(bites).getRecId()); assertTrue(ArrayUtils.isEquals(bites, marshaller.serialize(new BTreeRedirect(Long.MIN_VALUE)))); }
From source file:com.opengamma.util.timeseries.fast.longint.object.FastArrayLongObjectTimeSeries.java
private void init(final long[] times, final T[] values) { if (times.length != values.length) { throw new IllegalArgumentException( "Arrays are of different sizes: " + times.length + ", " + values.length); }//from ww w.j a v a 2 s . c o m System.arraycopy(times, 0, _times, 0, times.length); System.arraycopy(values, 0, _values, 0, values.length); // check dates are ordered long maxTime = Long.MIN_VALUE; for (final long time : _times) { if (time < maxTime) { throw new IllegalArgumentException("dates must be ordered"); } maxTime = time; } }