List of usage examples for java.lang Long MIN_VALUE
long MIN_VALUE
To view the source code for java.lang Long MIN_VALUE.
Click Source Link
From source file:org.apache.hadoop.hbase.quotas.FileArchiverNotifierImpl.java
@Override public void addArchivedFiles(Set<Entry<String, Long>> fileSizes) throws IOException { long start = System.nanoTime(); readLock.lock();// w ww . j ava 2 s . c o m try { // We want to catch the case where we got an archival request, but there was a full // re-computation in progress that was blocking us. Most likely, the full computation is going // to already include the changes we were going to make. // // Same as "start < lastFullCompute" but avoiding numeric overflow per the // System.nanoTime() javadoc if (lastFullCompute != Long.MIN_VALUE && start - lastFullCompute < 0) { if (LOG.isTraceEnabled()) { LOG.trace("A full computation was performed after this request was received." + " Ignoring requested updates: " + fileSizes); } return; } if (LOG.isTraceEnabled()) { LOG.trace("currentSnapshots: " + currentSnapshots + " fileSize: " + fileSizes); } // Write increment to quota table for the correct snapshot. Only do this if we have snapshots // and some files that were archived. if (!currentSnapshots.isEmpty() && !fileSizes.isEmpty()) { // We get back the files which no snapshot referenced (the files which will be deleted soon) groupArchivedFiledBySnapshotAndRecordSize(currentSnapshots, fileSizes); } } finally { readLock.unlock(); } }
From source file:com.cinnober.msgcodec.json.JsonValueHandlerTest.java
@Test public void testInt64DecodeMinValue() throws IOException { JsonParser p = f.createParser("9223372036854775808"); p.nextToken();/*from w ww . j a v a 2s . c om*/ assertEquals(Long.MIN_VALUE, JsonValueHandler.UINT64.readValue(p).longValue()); }
From source file:com.yoncabt.ebr.executor.BaseReport.java
public ReportDefinition loadDefinition(File reportFile, File jsonFile) throws AssertionError, IOException, JSONException { ReportDefinition ret = new ReportDefinition(reportFile); if (!jsonFile.exists()) { ret.setCaption(jsonFile.getName().replace(".ebr.json", "")); return ret; }/*from w w w. ja v a 2 s.c o m*/ String jsonComment = FileUtils.readFileToString(jsonFile, "utf-8"); JSONObject jsonObject = new JSONObject(jsonComment); ret.setCaption(jsonObject.optString("title", "NOT ITTLE")); ret.setDataSource(jsonObject.optString("datasource", "default")); ret.setTextEncoding(jsonObject.optString("text-encoding", "utf-8")); ret.setTextTemplate(jsonObject.optString("text-template", "SUITABLE")); if (jsonObject.has("fields")) { JSONArray fieldsArray = jsonObject.getJSONArray("fields"); for (int i = 0; i < fieldsArray.length(); i++) { JSONObject field = fieldsArray.getJSONObject(i); FieldType fieldType = FieldType.valueOfJSONName(field.getString("type")); switch (fieldType) { case DATE: { ReportParam<Date> rp = new ReportParam<>(Date.class); readCommon(ret, rp, field); if (field.has("default-value")) { rp.setDefaultValue(new Date(field.getLong("default-value"))); } break; } case STRING: { ReportParam<String> rp = new ReportParam<>(String.class); readCommon(ret, rp, field); if (field.has("default-value")) { rp.setDefaultValue(field.getString("default-value")); } break; } case INTEGER: { ReportParam<Integer> rp = new ReportParam<>(Integer.class); readCommon(ret, rp, field); int min = field.has("min") ? field.getInt("min") : Integer.MIN_VALUE; int max = field.has("max") ? field.getInt("max") : Integer.MAX_VALUE; rp.setMax(max); rp.setMin(min); if (field.has("default-value")) { rp.setDefaultValue(field.getInt("default-value")); } break; } case LONG: { ReportParam<Long> rp = new ReportParam<>(Long.class); readCommon(ret, rp, field); long min = field.has("min") ? field.getLong("min") : Long.MIN_VALUE; long max = field.has("max") ? field.getLong("max") : Long.MAX_VALUE; rp.setMax(max); rp.setMin(min); if (field.has("default-value")) { rp.setDefaultValue(field.getLong("default-value")); } break; } case DOUBLE: { ReportParam<Double> rp = new ReportParam<>(Double.class); readCommon(ret, rp, field); double min = field.has("min") ? field.getLong("min") : Double.MIN_VALUE; double max = field.has("max") ? field.getLong("max") : Double.MAX_VALUE; rp.setMax(max); rp.setMin(min); if (field.has("default-value")) { rp.setDefaultValue(field.getDouble("default-value")); } break; } default: { throw new AssertionError(fieldType); } } } } return ret; }
From source file:com.taobao.weex.devtools.json.ObjectMapperTest.java
@Test public void testObjectToPrimitive() throws JSONException { ArrayOfPrimitivesContainer container = new ArrayOfPrimitivesContainer(); ArrayList<Object> primitives = container.primitives; primitives.add(Long.MIN_VALUE); primitives.add(Long.MAX_VALUE); primitives.add(Integer.MIN_VALUE); primitives.add(Integer.MAX_VALUE); primitives.add(Float.MIN_VALUE); primitives.add(Float.MAX_VALUE); primitives.add(Double.MIN_VALUE); primitives.add(Double.MAX_VALUE); String json = mObjectMapper.convertValue(container, JSONObject.class).toString(); JSONObject obj = new JSONObject(json); JSONArray array = obj.getJSONArray("primitives"); ArrayList<Object> actual = new ArrayList<>(); for (int i = 0, N = array.length(); i < N; i++) { actual.add(array.get(i));/*from w w w.j av a 2s.co m*/ } assertEquals(primitives.toString(), actual.toString()); }
From source file:io.pravega.controller.store.stream.InMemoryStream.java
@Override CompletableFuture<Void> storeCreationTimeIfAbsent(long timestamp) { creationTime.compareAndSet(Long.MIN_VALUE, timestamp); return CompletableFuture.completedFuture(null); }
From source file:co.nubetech.apache.hadoop.DateSplitter.java
/** * Retrieve the value from the column in a type-appropriate manner and * return its timestamp since the epoch. If the column is null, then return * Long.MIN_VALUE. This will cause a special split to be generated for the * NULL case, but may also cause poorly-balanced splits if most of the * actual dates are positive time since the epoch, etc. *//* ww w . j av a2 s.c o m*/ private long resultSetColToLong(ResultSet rs, int colNum, int sqlDataType) throws SQLException { try { switch (sqlDataType) { case Types.DATE: return rs.getDate(colNum).getTime(); case Types.TIME: return rs.getTime(colNum).getTime(); case Types.TIMESTAMP: return rs.getTimestamp(colNum).getTime(); default: throw new SQLException("Not a date-type field"); } } catch (NullPointerException npe) { // null column. return minimum long value. LOG.warn("Encountered a NULL date in the split column. Splits may be poorly balanced."); return Long.MIN_VALUE; } }
From source file:org.apache.cassandra.db.CounterColumnTest.java
@Test public void testReconcile() throws UnknownHostException { IColumn left;/*w ww. j av a 2 s .c om*/ IColumn right; IColumn reconciled; ByteBuffer context; // tombstone + tombstone left = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 1L); right = new DeletedColumn(ByteBufferUtil.bytes("x"), 2, 2L); assert left.reconcile(right).getMarkedForDeleteAt() == right.getMarkedForDeleteAt(); assert right.reconcile(left).getMarkedForDeleteAt() == right.getMarkedForDeleteAt(); // tombstone > live left = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L); right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 1L); assert left.reconcile(right) == left; // tombstone < live last delete left = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 1L); right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L); assert left.reconcile(right) == right; // tombstone == live last delete left = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L); right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L); assert left.reconcile(right) == right; // tombstone > live last delete left = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 4L); right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 9L, 1L); reconciled = left.reconcile(right); assert reconciled.name() == right.name(); assert reconciled.value() == right.value(); assert reconciled.timestamp() == right.timestamp(); assert ((CounterColumn) reconciled).timestampOfLastDelete() == left.getMarkedForDeleteAt(); // live < tombstone left = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 1L); right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L); assert left.reconcile(right) == right; // live last delete > tombstone left = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L); right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 1L); assert left.reconcile(right) == left; // live last delete == tombstone left = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L); right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L); assert left.reconcile(right) == left; // live last delete < tombstone left = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 9L, 1L); right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 4L); reconciled = left.reconcile(right); assert reconciled.name() == left.name(); assert reconciled.value() == left.value(); assert reconciled.timestamp() == left.timestamp(); assert ((CounterColumn) reconciled).timestampOfLastDelete() == right.getMarkedForDeleteAt(); // live < live last delete left = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(1), 2L, 3L, false), 1L, Long.MIN_VALUE); right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(1), 1L, 1L, false), 4L, 3L); assert left.reconcile(right) == right; // live last delete > live left = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(1), 2L, 3L, false), 6L, 5L); right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(1), 1L, 1L, false), 4L, 3L); assert left.reconcile(right) == left; // live + live left = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(1), 1L, 1L, false), 4L, Long.MIN_VALUE); right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(1), 2L, 3L, false), 1L, Long.MIN_VALUE); reconciled = left.reconcile(right); assert reconciled.name().equals(left.name()); assert ((CounterColumn) reconciled).total() == 3L; assert reconciled.timestamp() == 4L; left = reconciled; right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(2), 1L, 5L, false), 2L, Long.MIN_VALUE); reconciled = left.reconcile(right); assert reconciled.name().equals(left.name()); assert ((CounterColumn) reconciled).total() == 8L; assert reconciled.timestamp() == 4L; left = reconciled; right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(NodeId.fromInt(2), 2L, 2L, false), 6L, Long.MIN_VALUE); reconciled = left.reconcile(right); assert reconciled.name().equals(left.name()); assert ((CounterColumn) reconciled).total() == 5L; assert reconciled.timestamp() == 6L; context = reconciled.value(); int hd = 2; // header assert hd + 2 * stepLength == context.remaining(); assert Util.equalsNodeId(NodeId.fromInt(1), context, hd + 0 * stepLength); assert 2L == context.getLong(hd + 0 * stepLength + idLength); assert 3L == context.getLong(hd + 0 * stepLength + idLength + clockLength); assert Util.equalsNodeId(NodeId.fromInt(2), context, hd + 1 * stepLength); assert 2L == context.getLong(hd + 1 * stepLength + idLength); assert 2L == context.getLong(hd + 1 * stepLength + idLength + clockLength); assert ((CounterColumn) reconciled).timestampOfLastDelete() == Long.MIN_VALUE; }
From source file:com.cloudera.sqoop.mapreduce.db.DateSplitter.java
/** Retrieve the value from the column in a type-appropriate manner and return its timestamp since the epoch. If the column is null, then return Long.MIN_VALUE. This will cause a special split to be generated for the NULL case, but may also cause poorly-balanced splits if most of the actual dates are positive time since the epoch, etc. *///from w ww . j a v a2s .c o m private long resultSetColToLong(ResultSet rs, int colNum, int sqlDataType) throws SQLException { try { switch (sqlDataType) { case Types.DATE: return rs.getDate(colNum).getTime(); case Types.TIME: return rs.getTime(colNum).getTime(); case Types.TIMESTAMP: return rs.getTimestamp(colNum).getTime(); default: throw new SQLException("Not a date-type field"); } } catch (NullPointerException npe) { // null column. return minimum long value. LOG.warn("Encountered a NULL date in the split column. " + "Splits may be poorly balanced."); return Long.MIN_VALUE; } }
From source file:com.intel.stl.ui.monitor.ChartScaleGroupManager.java
/** * //from ww w. j av a 2 s. c o m * Description: Calculates min/max for all datasets registered. * */ protected void calculateRangeBounds() { long lower = Long.MAX_VALUE; long upper = Long.MIN_VALUE; for (E dataset : chartDataMap.values()) { long[] minMax = getMinMax(dataset); lower = Math.min(lower, minMax[0]); upper = Math.max(upper, minMax[1]); } this.lower = lower; this.upper = upper; }
From source file:librec.data.DataDAO.java
/** * Read data from the database. Note that we don't take care of duplicate lines. * //w w w.j a v a 2s . c om * @return a sparse matrix storing all the relevant data */ public SparseMatrix[] loadData(double binThold, int maxIds) throws Exception { Logs.info("Dataset: From database"); // Table {row-id, col-id, rate} Table<Integer, Integer, Double> dataTable = HashBasedTable.create(); // Table {row-id, col-id, timestamp} Table<Integer, Integer, Long> timeTable = null; // Map {col-id, multiple row-id}: used to fast build a rating matrix Multimap<Integer, Integer> colMap = HashMultimap.create(); DatabaseManager dbm = new DatabaseManager(); Connection conn = null; PreparedStatement stmnt = null; ResultSet rs = null; minTimestamp = Long.MAX_VALUE; maxTimestamp = Long.MIN_VALUE; try { conn = dbm.getConnection(); if (maxIds > 0) { stmnt = conn.prepareStatement( "SELECT UserId, ItemId, Time, Rating FROM Rating WHERE ItemId < ? AND UserId < ?;"); stmnt.setInt(1, maxIds); stmnt.setInt(2, maxIds); } else { stmnt = conn.prepareStatement("SELECT UserId, ItemId, Time, Rating FROM Rating;"); } // Logs.info("Executing statement: {}", stmnt); rs = stmnt.executeQuery(); while (rs.next()) { int user = rs.getInt("UserId"); int item = rs.getInt("ItemId"); // convert time to milliseconds long mms = rs.getTimestamp("Time").getTime(); long timestamp = timeUnit.toMillis(mms); Double rate = rs.getDouble("Rating"); // binarize the rating for item recommendation task if (binThold >= 0) rate = rate > binThold ? 1.0 : 0.0; scaleDist.add(rate); // inner id starting from 0 int row = userIds.containsKey(user) ? userIds.get(user) : userIds.size(); userIds.put(user, row); int col = itemIds.containsKey(item) ? itemIds.get(item) : itemIds.size(); itemIds.put(item, col); dataTable.put(row, col, rate); colMap.put(col, row); // record rating's issuing time if (timeTable == null) timeTable = HashBasedTable.create(); if (minTimestamp > timestamp) minTimestamp = timestamp; if (maxTimestamp < timestamp) maxTimestamp = timestamp; timeTable.put(row, col, timestamp); // if(dataTable.size() % 100000 == 0) // Logs.info("Ratings loaded into dataTable: {}", dataTable.size()); } } catch (SQLException e) { e.printStackTrace(); } finally { DbUtils.closeQuietly(stmnt); DbUtils.closeQuietly(conn); DbUtils.closeQuietly(rs); } // Logs.info("All ratings loaded into dataTable"); numRatings = scaleDist.size(); ratingScale = new ArrayList<>(scaleDist.elementSet()); Collections.sort(ratingScale); int numRows = numUsers(), numCols = numItems(); // if min-rate = 0.0, shift upper a scale double minRate = ratingScale.get(0).doubleValue(); double epsilon = minRate == 0.0 ? ratingScale.get(1).doubleValue() - minRate : 0; if (epsilon > 0) { // shift upper a scale for (int i = 0, im = ratingScale.size(); i < im; i++) { double val = ratingScale.get(i); ratingScale.set(i, val + epsilon); } // update data table for (int row = 0; row < numRows; row++) { for (int col = 0; col < numCols; col++) { if (dataTable.contains(row, col)) dataTable.put(row, col, dataTable.get(row, col) + epsilon); } } } String dateRange = ""; dateRange = String.format(", Timestamps = {%s, %s}", Dates.toString(minTimestamp), Dates.toString(maxTimestamp)); Logs.debug("With Specs: {Users, {}} = {{}, {}, {}}, Scale = {{}}{}", (isItemAsUser ? "Users, Links" : "Items, Ratings"), numRows, numCols, numRatings, Strings.toString(ratingScale), dateRange); // build rating matrix rateMatrix = new SparseMatrix(numRows, numCols, dataTable, colMap); if (timeTable != null) timeMatrix = new SparseMatrix(numRows, numCols, timeTable, colMap); // release memory of data table dataTable = null; timeTable = null; return new SparseMatrix[] { rateMatrix, timeMatrix }; }