List of usage examples for java.lang Long MIN_VALUE
long MIN_VALUE
To view the source code for java.lang Long MIN_VALUE.
Click Source Link
From source file:com.palantir.atlasdb.keyvalue.impl.InMemoryKeyValueService.java
@Override public Multimap<Cell, Long> getAllTimestamps(String tableName, Set<Cell> cells, long ts) { Multimap<Cell, Long> multimap = HashMultimap.create(); ConcurrentSkipListMap<Key, byte[]> table = getTableMap(tableName).entries; for (Cell key : cells) { for (Key entry : table.subMap(new Key(key, Long.MIN_VALUE), new Key(key, ts)).keySet()) { multimap.put(key, entry.ts); }/*w ww . j av a 2 s . c om*/ } return multimap; }
From source file:com.navercorp.pinpoint.common.buffer.FixedBufferTest.java
@Test public void testPutVLong() throws Exception { checkVLong(1);/* w ww . ja v a 2 s.co m*/ checkVLong(-1); checkVLong(Long.MAX_VALUE); checkVLong(Long.MIN_VALUE); checkVLong(Long.MAX_VALUE / 2); checkVLong(Long.MIN_VALUE / 2); checkVLong(Long.MAX_VALUE / 128); checkVLong(Long.MAX_VALUE / 102400); checkVLong(900719925474L); checkVLong(9007199254L); checkVLong(225179981); checkVLong(1179981); checkVLong(9981); checkVLong(127); checkVLong(-127); checkVLong(0L); checkVLong(127L); checkVLong(128L); checkVLong(16383L); checkVLong(16384L); checkVLong(268435455L); checkVLong(268435456L); checkVLong(34359738367L); checkVLong(34359738368L); }
From source file:eionet.cr.util.Util.java
/** * Algorithm calculates the estimated number of hashes. * * @param minHash//from w w w . j a v a 2s.co m * @param maxHash * @return */ public static int calculateHashesCount(long minHash, long maxHash) { BigDecimal minValue = new BigDecimal(Long.MIN_VALUE); BigDecimal maxValue = new BigDecimal(Long.MAX_VALUE); BigDecimal lowKey = new BigDecimal(minHash); BigDecimal highKey = new BigDecimal(maxHash); BigDecimal distance = maxValue.subtract(highKey).add(lowKey).subtract(minValue); BigDecimal hitCount = new BigDecimal(2).pow(64).divide(distance, 0, BigDecimal.ROUND_HALF_UP); return hitCount.intValue(); }
From source file:com.jcwhatever.nucleus.internal.managed.commands.Arguments.java
@Override public long getLong(String parameterName) throws InvalidArgumentException { return getLong(parameterName, Long.MIN_VALUE, Long.MAX_VALUE); }
From source file:au.edu.anu.portal.portlets.tweetal.servlet.TweetalServlet.java
public void getTweets(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("application/json"); PrintWriter out = response.getWriter(); String userToken = request.getParameter("u"); String userSecret = request.getParameter("s"); Twitter twitter = twitterLogic.getTwitterAuthForUser(userToken, userSecret); if (twitter == null) { // no connection response.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE); return;// ww w .ja v a2s .c o m } String cacheKey = userToken; Element element = null; // force refresh boolean force = Boolean.parseBoolean(request.getParameter("force")); if (force) { log.debug("force refresh for " + userToken); // remove tweets cache tweetsCache.remove(cacheKey); } else { element = tweetsCache.get(cacheKey); } if (element == null) { synchronized (tweetsCache) { // if it is still null after acquiring lock element = tweetsCache.get(cacheKey); if (element == null) { log.debug("cache miss: getting tweets for " + userToken); System.out.println("Last refreshed: " + Calendar.getInstance().getTime()); try { ResponseList<Status> friendStatus = twitter.getFriendsTimeline(); long maxId = Long.MIN_VALUE; JSONObject json = new JSONObject(); lastRefreshed = Calendar.getInstance().getTime().toString(); if (lastRefreshed == null) { json.element("lastRefreshed", "unable to retrieve last refreshed"); } else { json.element("lastRefreshed", lastRefreshed.toString()); } User currentUser = twitter.showUser(twitter.getId()); Status lastUserStatus = currentUser.getStatus(); if (lastUserStatus == null) { json.element("lastStatusUpdate", "unable to retrieve last status"); } else { Date lastStatusUpdate = lastUserStatus.getCreatedAt(); json.element("lastStatusUpdate", lastStatusUpdate.toString()); } for (Status status : friendStatus) { maxId = Math.max(maxId, status.getId()); json.accumulate("statusList", getStatusJSON(twitter, status)); } if (log.isDebugEnabled()) { log.debug(json.toString(2)); } out.print(json.toString()); element = new Element(cacheKey, new TweetsCacheElement(System.currentTimeMillis(), maxId, json)); tweetsCache.put(element); return; } catch (TwitterException e) { log.error("GetTweets: " + e.getStatusCode() + ": " + e.getClass() + e.getMessage()); if (e.getStatusCode() == 401) { // invalid credentials response.sendError(HttpServletResponse.SC_UNAUTHORIZED); } else if (e.getStatusCode() == -1) { // no connection response.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE); } else { // general error response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } return; } } } } // tweets available in cache log.debug("cache hit: getting tweets for " + userToken); TweetsCacheElement tweets = (TweetsCacheElement) element.getObjectValue(); // if just refreshed too quickly, don't request update, just use // whatever in cache long period = System.currentTimeMillis() - tweets.getLastRefresh(); System.out.println("Already refreshed: " + (period / 1000) + " second(s) ago"); if (period < 2 * 60 * 1000) { log.debug("refreshed too quickly: " + (period / 1000) + " seconds"); JSONObject json = tweets.getResult(); lastRefreshed = Calendar.getInstance().getTime().toString(); json.element("lastRefreshed", lastRefreshed.toString()); out.print(json.toString()); return; } // get new updates since the last id long maxId = tweets.lastId; try { JSONObject json = tweets.getResult(); ResponseList<Status> friendStatus = twitter.getFriendsTimeline(new Paging(maxId)); tweets.setLastRefresh(System.currentTimeMillis()); log.debug(String.format("Got %d new tweets", friendStatus.size())); if (friendStatus.size() > 0) { JSONArray newTweets = new JSONArray(); lastRefreshed = Calendar.getInstance().getTime().toString(); json.element("lastRefreshed", lastRefreshed.toString()); for (Status status : friendStatus) { maxId = Math.max(maxId, status.getId()); newTweets.add(getStatusJSON(twitter, status)); } if (log.isDebugEnabled()) { log.debug("new tweets:\n" + newTweets.toString(2)); } json.getJSONArray("statusList").addAll(0, newTweets); tweets.setLastId(maxId); User currentUser = twitter.showUser(twitter.getId()); Status lastUserStatus = currentUser.getStatus(); if (lastUserStatus == null) { json.element("lastStatusUpdate", "unable to retrieve last status"); } else { Date lastStatusUpdate = lastUserStatus.getCreatedAt(); json.element("lastStatusUpdate", lastStatusUpdate.toString()); } } out.print(json.toString()); } catch (TwitterException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (JSONException e) { log.error(e); } }
From source file:cn.edu.bjtu.cit.recommender.Recommender.java
@SuppressWarnings("unchecked") public int run(String[] args) throws Exception { if (args.length < 2) { System.err.println();/*from w w w .j a va 2 s . c o m*/ System.err.println("Usage: " + this.getClass().getName() + " [generic options] input output [profiling] [estimation] [clustersize]"); System.err.println(); printUsage(); GenericOptionsParser.printGenericCommandUsage(System.err); return 1; } OptionParser parser = new OptionParser(args); Pipeline pipeline = new MRPipeline(Recommender.class, getConf()); if (parser.hasOption(CLUSTER_SIZE)) { pipeline.getConfiguration().setInt(ClusterOracle.CLUSTER_SIZE, Integer.parseInt(parser.getOption(CLUSTER_SIZE).getValue())); } if (parser.hasOption(PROFILING)) { pipeline.getConfiguration().setBoolean(Profiler.IS_PROFILE, true); this.profileFilePath = parser.getOption(PROFILING).getValue(); } if (parser.hasOption(ESTIMATION)) { estFile = parser.getOption(ESTIMATION).getValue(); est = new Estimator(estFile, clusterSize); } if (parser.hasOption(OPT_REDUCE)) { pipeline.getConfiguration().setBoolean(OPT_REDUCE, true); } if (parser.hasOption(OPT_MSCR)) { pipeline.getConfiguration().setBoolean(OPT_MSCR, true); } if (parser.hasOption(ACTIVE_THRESHOLD)) { threshold = Integer.parseInt(parser.getOption("at").getValue()); } if (parser.hasOption(TOP)) { top = Integer.parseInt(parser.getOption("top").getValue()); } profiler = new Profiler(pipeline); /* * input node */ PCollection<String> lines = pipeline.readTextFile(args[0]); if (profiler.isProfiling() && lines.getSize() > 10 * 1024 * 1024) { lines = lines.sample(0.1); } /* * S0 + GBK */ PGroupedTable<Long, Long> userWithPrefs = lines.parallelDo(new MapFn<String, Pair<Long, Long>>() { @Override public Pair<Long, Long> map(String input) { String[] split = input.split(Estimator.DELM); long userID = Long.parseLong(split[0]); long itemID = Long.parseLong(split[1]); return Pair.of(userID, itemID); } @Override public float scaleFactor() { return est.getScaleFactor("S0").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S0").recsFactor; } }, Writables.tableOf(Writables.longs(), Writables.longs())).groupByKey(est.getClusterSize()); /* * S1 */ PTable<Long, Vector> userVector = userWithPrefs .parallelDo(new MapFn<Pair<Long, Iterable<Long>>, Pair<Long, Vector>>() { @Override public Pair<Long, Vector> map(Pair<Long, Iterable<Long>> input) { Vector userVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 100); for (long itemPref : input.second()) { userVector.set((int) itemPref, 1.0f); } return Pair.of(input.first(), userVector); } @Override public float scaleFactor() { return est.getScaleFactor("S1").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S1").recsFactor; } }, Writables.tableOf(Writables.longs(), Writables.vectors())); userVector = profiler.profile("S0-S1", pipeline, userVector, ProfileConverter.long_vector(), Writables.tableOf(Writables.longs(), Writables.vectors())); /* * S2 */ PTable<Long, Vector> filteredUserVector = userVector .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, Vector>>() { @Override public void process(Pair<Long, Vector> input, Emitter<Pair<Long, Vector>> emitter) { if (input.second().getNumNondefaultElements() > threshold) { emitter.emit(input); } } @Override public float scaleFactor() { return est.getScaleFactor("S2").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S2").recsFactor; } }, Writables.tableOf(Writables.longs(), Writables.vectors())); filteredUserVector = profiler.profile("S2", pipeline, filteredUserVector, ProfileConverter.long_vector(), Writables.tableOf(Writables.longs(), Writables.vectors())); /* * S3 + GBK */ PGroupedTable<Integer, Integer> coOccurencePairs = filteredUserVector .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, Integer>>() { @Override public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, Integer>> emitter) { Iterator<Vector.Element> it = input.second().iterateNonZero(); while (it.hasNext()) { int index1 = it.next().index(); Iterator<Vector.Element> it2 = input.second().iterateNonZero(); while (it2.hasNext()) { int index2 = it2.next().index(); emitter.emit(Pair.of(index1, index2)); } } } @Override public float scaleFactor() { float size = est.getScaleFactor("S3").sizeFactor; return size; } @Override public float scaleFactorByRecord() { float recs = est.getScaleFactor("S3").recsFactor; return recs; } }, Writables.tableOf(Writables.ints(), Writables.ints())).groupByKey(est.getClusterSize()); /* * S4 */ PTable<Integer, Vector> coOccurenceVector = coOccurencePairs .parallelDo(new MapFn<Pair<Integer, Iterable<Integer>>, Pair<Integer, Vector>>() { @Override public Pair<Integer, Vector> map(Pair<Integer, Iterable<Integer>> input) { Vector cooccurrenceRow = new RandomAccessSparseVector(Integer.MAX_VALUE, 100); for (int itemIndex2 : input.second()) { cooccurrenceRow.set(itemIndex2, cooccurrenceRow.get(itemIndex2) + 1.0); } return Pair.of(input.first(), cooccurrenceRow); } @Override public float scaleFactor() { return est.getScaleFactor("S4").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S4").recsFactor; } }, Writables.tableOf(Writables.ints(), Writables.vectors())); coOccurenceVector = profiler.profile("S3-S4", pipeline, coOccurenceVector, ProfileConverter.int_vector(), Writables.tableOf(Writables.ints(), Writables.vectors())); /* * S5 Wrapping co-occurrence columns */ PTable<Integer, VectorOrPref> wrappedCooccurrence = coOccurenceVector .parallelDo(new MapFn<Pair<Integer, Vector>, Pair<Integer, VectorOrPref>>() { @Override public Pair<Integer, VectorOrPref> map(Pair<Integer, Vector> input) { return Pair.of(input.first(), new VectorOrPref(input.second())); } @Override public float scaleFactor() { return est.getScaleFactor("S5").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S5").recsFactor; } }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs())); wrappedCooccurrence = profiler.profile("S5", pipeline, wrappedCooccurrence, ProfileConverter.int_vopv(), Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs())); /* * S6 Splitting user vectors */ PTable<Integer, VectorOrPref> userVectorSplit = filteredUserVector .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, VectorOrPref>>() { @Override public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, VectorOrPref>> emitter) { long userID = input.first(); Vector userVector = input.second(); Iterator<Vector.Element> it = userVector.iterateNonZero(); while (it.hasNext()) { Vector.Element e = it.next(); int itemIndex = e.index(); float preferenceValue = (float) e.get(); emitter.emit(Pair.of(itemIndex, new VectorOrPref(userID, preferenceValue))); } } @Override public float scaleFactor() { return est.getScaleFactor("S6").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S6").recsFactor; } }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs())); userVectorSplit = profiler.profile("S6", pipeline, userVectorSplit, ProfileConverter.int_vopp(), Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs())); /* * S7 Combine VectorOrPrefs */ PTable<Integer, VectorAndPrefs> combinedVectorOrPref = wrappedCooccurrence.union(userVectorSplit) .groupByKey(est.getClusterSize()) .parallelDo(new DoFn<Pair<Integer, Iterable<VectorOrPref>>, Pair<Integer, VectorAndPrefs>>() { @Override public void process(Pair<Integer, Iterable<VectorOrPref>> input, Emitter<Pair<Integer, VectorAndPrefs>> emitter) { Vector vector = null; List<Long> userIDs = Lists.newArrayList(); List<Float> values = Lists.newArrayList(); for (VectorOrPref vop : input.second()) { if (vector == null) { vector = vop.getVector(); } long userID = vop.getUserID(); if (userID != Long.MIN_VALUE) { userIDs.add(vop.getUserID()); } float value = vop.getValue(); if (!Float.isNaN(value)) { values.add(vop.getValue()); } } emitter.emit(Pair.of(input.first(), new VectorAndPrefs(vector, userIDs, values))); } @Override public float scaleFactor() { return est.getScaleFactor("S7").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S7").recsFactor; } }, Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs())); combinedVectorOrPref = profiler.profile("S5+S6-S7", pipeline, combinedVectorOrPref, ProfileConverter.int_vap(), Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs())); /* * S8 Computing partial recommendation vectors */ PTable<Long, Vector> partialMultiply = combinedVectorOrPref .parallelDo(new DoFn<Pair<Integer, VectorAndPrefs>, Pair<Long, Vector>>() { @Override public void process(Pair<Integer, VectorAndPrefs> input, Emitter<Pair<Long, Vector>> emitter) { Vector cooccurrenceColumn = input.second().getVector(); List<Long> userIDs = input.second().getUserIDs(); List<Float> prefValues = input.second().getValues(); for (int i = 0; i < userIDs.size(); i++) { long userID = userIDs.get(i); if (userID != Long.MIN_VALUE) { float prefValue = prefValues.get(i); Vector partialProduct = cooccurrenceColumn.times(prefValue); emitter.emit(Pair.of(userID, partialProduct)); } } } @Override public float scaleFactor() { return est.getScaleFactor("S8").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S8").recsFactor; } }, Writables.tableOf(Writables.longs(), Writables.vectors())).groupByKey(est.getClusterSize()) .combineValues(new CombineFn<Long, Vector>() { @Override public void process(Pair<Long, Iterable<Vector>> input, Emitter<Pair<Long, Vector>> emitter) { Vector partial = null; for (Vector vector : input.second()) { partial = partial == null ? vector : partial.plus(vector); } emitter.emit(Pair.of(input.first(), partial)); } @Override public float scaleFactor() { return est.getScaleFactor("combine").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("combine").recsFactor; } }); partialMultiply = profiler.profile("S8-combine", pipeline, partialMultiply, ProfileConverter.long_vector(), Writables.tableOf(Writables.longs(), Writables.vectors())); /* * S9 Producing recommendations from vectors */ PTable<Long, RecommendedItems> recommendedItems = partialMultiply .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, RecommendedItems>>() { @Override public void process(Pair<Long, Vector> input, Emitter<Pair<Long, RecommendedItems>> emitter) { Queue<RecommendedItem> topItems = new PriorityQueue<RecommendedItem>(11, Collections.reverseOrder(BY_PREFERENCE_VALUE)); Iterator<Vector.Element> recommendationVectorIterator = input.second().iterateNonZero(); while (recommendationVectorIterator.hasNext()) { Vector.Element element = recommendationVectorIterator.next(); int index = element.index(); float value = (float) element.get(); if (topItems.size() < top) { topItems.add(new GenericRecommendedItem(index, value)); } else if (value > topItems.peek().getValue()) { topItems.add(new GenericRecommendedItem(index, value)); topItems.poll(); } } List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size()); recommendations.addAll(topItems); Collections.sort(recommendations, BY_PREFERENCE_VALUE); emitter.emit(Pair.of(input.first(), new RecommendedItems(recommendations))); } @Override public float scaleFactor() { return est.getScaleFactor("S9").sizeFactor; } @Override public float scaleFactorByRecord() { return est.getScaleFactor("S9").recsFactor; } }, Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems())); recommendedItems = profiler.profile("S9", pipeline, recommendedItems, ProfileConverter.long_ri(), Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems())); /* * Profiling */ if (profiler.isProfiling()) { profiler.writeResultToFile(profileFilePath); profiler.cleanup(pipeline.getConfiguration()); return 0; } /* * asText */ pipeline.writeTextFile(recommendedItems, args[1]); PipelineResult result = pipeline.done(); return result.succeeded() ? 0 : 1; }
From source file:com.marianhello.cordova.bgloc.LocationUpdateService.java
/** * Returns the most accurate and timely previously detected location. * Where the last result is beyond the specified maximum distance or * latency a one-off location update is returned via the {@link LocationListener} * specified in {@link setChangedLocationListener}. * @param minDistance Minimum distance before we require a location update. * @param minTime Minimum time required between location updates. * @return The most accurate and / or timely previously detected location. *//* w w w. j a v a 2 s. co m*/ public Location getLastBestLocation() { int minDistance = (int) stationaryRadius; long minTime = System.currentTimeMillis() - (locationTimeout * 1000); Log.i(TAG, "- fetching last best location " + minDistance + "," + minTime); Location bestResult = null; float bestAccuracy = Float.MAX_VALUE; long bestTime = Long.MIN_VALUE; // Iterate through all the providers on the system, keeping // note of the most accurate result within the acceptable time limit. // If no result is found within maxTime, return the newest Location. List<String> matchingProviders = locationManager.getAllProviders(); for (String provider : matchingProviders) { Log.d(TAG, "- provider: " + provider); Location location = locationManager.getLastKnownLocation(provider); if (location != null) { Log.d(TAG, " location: " + location.getLatitude() + "," + location.getLongitude() + "," + location.getAccuracy() + "," + location.getSpeed() + "m/s"); float accuracy = location.getAccuracy(); long time = location.getTime(); Log.d(TAG, "time>minTime: " + (time > minTime) + ", accuracy<bestAccuracy: " + (accuracy < bestAccuracy)); if ((time > minTime && accuracy < bestAccuracy)) { bestResult = location; bestAccuracy = accuracy; bestTime = time; } } } return bestResult; }
From source file:gobblin.compaction.mapreduce.MRCompactorJobRunner.java
/** * For regular compactions, compaction timestamp is the time the compaction job starts. * * If this is a recompaction from output paths, the compaction timestamp will remain the same as previously * persisted compaction time. This is because such a recompaction doesn't consume input data, so next time, * whether a file in the input folder is considered late file should still be based on the previous compaction * timestamp./* w ww .ja v a 2 s. c o m*/ */ private DateTime getCompactionTimestamp() throws IOException { DateTimeZone timeZone = DateTimeZone.forID(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE)); if (!this.recompactFromDestPaths) { return new DateTime(timeZone); } Set<Path> inputPaths = getInputPaths(); long maxTimestamp = Long.MIN_VALUE; for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, inputPaths)) { maxTimestamp = Math.max(maxTimestamp, status.getModificationTime()); } return maxTimestamp == Long.MIN_VALUE ? new DateTime(timeZone) : new DateTime(maxTimestamp, timeZone); }
From source file:com.clustercontrol.maintenance.dialog.HinemosPropertyDialog.java
/** * ?????//from w ww . j av a 2 s. co m * * @return true?false * * @see com.clustercontrol.dialog.CommonDialog#action() */ @Override protected boolean action() { boolean result = false; if (HinemosPropertyInfo == null) { this.HinemosPropertyInfo = new HinemosPropertyInfo(); } HinemosPropertyInfo.setKey(m_key.getText()); HinemosPropertyInfo.setValueType(valueType); if (valueType == HinemosPropertyTypeConstant.TYPE_STRING) { HinemosPropertyInfo.setValueString(m_value.getText()); } else if (valueType == HinemosPropertyTypeConstant.TYPE_NUMERIC) { if (m_value.getText() == null || m_value.getText().trim().length() == 0) { HinemosPropertyInfo.setValueNumeric(null); } else { try { HinemosPropertyInfo.setValueNumeric(Long.parseLong(m_value.getText().trim())); } catch (NumberFormatException e) { m_log.info("action() setValueNumeric, " + e.getMessage()); Object[] args = { Messages.getString("hinemos.property.value"), Long.MIN_VALUE, Long.MAX_VALUE }; MessageDialog.openError(null, Messages.getString("failed"), Messages.getString("message.common.4", args)); return false; } } } else { HinemosPropertyInfo.setValueBoolean(Boolean.parseBoolean(m_blCombo.getText())); } HinemosPropertyInfo.setDescription(m_textDescription.getText()); HinemosPropertyInfo.setOwnerRoleId(RoleIdConstant.ADMINISTRATORS); if (this.mode == PropertyDefineConstant.MODE_ADD) { // ??? String managerName = this.m_managerComposite.getText(); String[] args = { HinemosPropertyInfo.getKey(), managerName }; try { HinemosPropertyEndpointWrapper wrapper = HinemosPropertyEndpointWrapper.getWrapper(managerName); wrapper.addHinemosProperty(HinemosPropertyInfo); MessageDialog.openInformation(null, Messages.getString("successful"), Messages.getString("message.hinemos.property.2", args)); result = true; } catch (HinemosPropertyDuplicate_Exception e) { // ???????? MessageDialog.openInformation(null, Messages.getString("message"), Messages.getString("message.hinemos.property.10", args)); } catch (Exception e) { String errMessage = ""; if (e instanceof InvalidRole_Exception) { MessageDialog.openInformation(null, Messages.getString("message"), Messages.getString("message.accesscontrol.16")); } else { errMessage = ", " + HinemosMessage.replace(e.getMessage()); } MessageDialog.openError(null, Messages.getString("failed"), Messages.getString("message.hinemos.property.3", args) + errMessage); } } else if (this.mode == PropertyDefineConstant.MODE_MODIFY) { // ?? result = new ModifyHinemosProperty().modify(m_managerComposite.getText(), HinemosPropertyInfo); } return result; }
From source file:com.korrelate.pig.hbase.HBaseStorage.java
/** * Constructor. Construct a HBase Table LoadFunc and StoreFunc to load or store. * @param columnList//from ww w .j a v a 2s . c o m * @param optString Loader options. Known options:<ul> * <li>-loadKey=(true|false) Load the row key as the first column * <li>-gt=minKeyVal * <li>-lt=maxKeyVal * <li>-gte=minKeyVal * <li>-lte=maxKeyVal * <li>-limit=numRowsPerRegion max number of rows to retrieve per region * <li>-delim=char delimiter to use when parsing column names (default is space or comma) * <li>-ignoreWhitespace=(true|false) ignore spaces when parsing column names (default true) * <li>-caching=numRows number of rows to cache (faster scans, more memory). * <li>-noWAL=(true|false) Sets the write ahead to false for faster loading. * <li>-minTimestamp= Scan's timestamp for min timeRange * <li>-maxTimestamp= Scan's timestamp for max timeRange * <li>-timestamp= Scan's specified timestamp * <li>-caster=(HBaseBinaryConverter|Utf8StorageConverter) Utf8StorageConverter is the default * <li>-timestampVersionFieldIndex=fieldIndex the index of the input field to use as the HBase row version (default -1 which means the current timestamp is used) * To be used with extreme caution, since this could result in data loss * (see http://hbase.apache.org/book.html#perf.hbase.client.putwal). * </ul> * @throws ParseException * @throws IOException */ public HBaseStorage(String columnList, String optString) throws ParseException, IOException { populateValidOptions(); String[] optsArr = optString.split(" "); try { configuredOptions_ = parser_.parse(validOptions_, optsArr); } catch (ParseException e) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp( "[-loadKey] [-gt] [-gte] [-lt] [-lte] [-columnPrefix] [-caching] [-caster] [-noWAL] [-limit] [-delim] [-ignoreWhitespace] [-minTimestamp] [-maxTimestamp] [-timestamp]", validOptions_); throw e; } loadRowKey_ = configuredOptions_.hasOption("loadKey"); delimiter_ = ","; if (configuredOptions_.getOptionValue("delim") != null) { delimiter_ = configuredOptions_.getOptionValue("delim"); } ignoreWhitespace_ = true; if (configuredOptions_.hasOption("ignoreWhitespace")) { String value = configuredOptions_.getOptionValue("ignoreWhitespace"); if (!"true".equalsIgnoreCase(value)) { ignoreWhitespace_ = false; } } columnInfo_ = parseColumnList(columnList, delimiter_, ignoreWhitespace_); String defaultCaster = UDFContext.getUDFContext().getClientSystemProps().getProperty(CASTER_PROPERTY, STRING_CASTER); String casterOption = configuredOptions_.getOptionValue("caster", defaultCaster); if (STRING_CASTER.equalsIgnoreCase(casterOption)) { caster_ = new Utf8StorageConverter(); } else if (BYTE_CASTER.equalsIgnoreCase(casterOption)) { caster_ = new HBaseBinaryConverter(); } else { try { caster_ = (LoadCaster) PigContext.instantiateFuncFromSpec(casterOption); } catch (ClassCastException e) { LOG.error("Configured caster does not implement LoadCaster interface."); throw new IOException(e); } catch (RuntimeException e) { LOG.error("Configured caster class not found.", e); throw new IOException(e); } } LOG.debug("Using caster " + caster_.getClass()); caching_ = Integer.valueOf(configuredOptions_.getOptionValue("caching", "100")); limit_ = Long.valueOf(configuredOptions_.getOptionValue("limit", "-1")); noWAL_ = configuredOptions_.hasOption("noWAL"); if (configuredOptions_.hasOption("minTimestamp")) { minTimestamp_ = Long.parseLong(configuredOptions_.getOptionValue("minTimestamp")); } else { minTimestamp_ = Long.MIN_VALUE; } if (configuredOptions_.hasOption("maxTimestamp")) { maxTimestamp_ = Long.parseLong(configuredOptions_.getOptionValue("maxTimestamp")); } else { maxTimestamp_ = Long.MAX_VALUE; } if (configuredOptions_.hasOption("timestamp")) { timestamp_ = Long.parseLong(configuredOptions_.getOptionValue("timestamp")); } else { timestamp_ = 0; } if (configuredOptions_.hasOption("timestampVersionFieldIndex")) { timestampVersionFieldIndex_ = Integer .parseInt(configuredOptions_.getOptionValue("timestampVersionFieldIndex")); } else { timestampVersionFieldIndex_ = -1; } initScan(); }