List of usage examples for java.util Comparator compare
int compare(T o1, T o2);
From source file:org.briljantframework.array.Arrays.java
/** * Sort the array according to the given comparator. * * @param array the array/*from w w w . j ava 2 s . c om*/ * @param comparator the comparator * @param <T> the element type * @return a new array */ public static <T> Array<T> sort(Array<T> array, Comparator<T> comparator) { return sort(array, (a, i, j) -> comparator.compare(a.get(i), a.get(j))); }
From source file:org.briljantframework.array.Arrays.java
/** * Sort each vector along the specified dimension according to the given comparator. * * @param dim the dimension/*from w w w. ja va2 s. c o m*/ * @param array the array * @param comparator the comparator * @param <T> the element type * @return a new array */ public static <T> Array<T> sort(int dim, Array<T> array, Comparator<T> comparator) { return sort(dim, array, (a, i, j) -> comparator.compare(a.get(i), a.get(j))); }
From source file:org.softwareforge.struts2.breadcrumb.BreadCrumbInterceptor.java
private void doIntercept(ActionInvocation invocation, BreadCrumb annotation) { UtilTimerStack.push(TIMER_KEY + "doIntercept"); if (annotation != null) { Crumb current = makeCrumb(invocation, annotation.value()); // get the bread crumbs trail BreadCrumbTrail trail = getBreadCrumbTrail(invocation); // Retrieve default configuration RewindMode mode = defaultRewindMode; Comparator<Crumb> comparator = defaultComparator; int maxCrumbs = trail.maxCrumbs; /*//from w ww . j a v a 2s .c o m * override defaults (if required) */ if (annotation.rewind() != RewindMode.DEFAULT) mode = annotation.rewind(); if (annotation.comparator() != BreadCrumb.NULL.class) { comparator = plugin.lookupComparatorByClass(annotation.comparator()); } /* * Retrieve stored crumbs and synchronize on it. * * synchronized region is needed to prevent * ConcurrentModificationException(s) for requests (operating on the * same session) that want modify the bread crumbs trail. */ Stack<Crumb> crumbs = trail.getCrumbs(); synchronized (crumbs) { LOG.trace("aquired lock on crumbs " + crumbs); Crumb last = (crumbs.size() == 0) ? null : crumbs.lastElement(); /* * compare current and last crumbs */ if (comparator.compare(current, last) != 0) { int dupIdx = trail.indexOf(current, comparator); if (mode == RewindMode.AUTO && dupIdx != -1) { trail.rewindAt(dupIdx - 1); } crumbs.push(current); if (crumbs.size() > maxCrumbs) crumbs.remove(0); } else { if (crumbs.size() > 0) { crumbs.remove(crumbs.size() - 1); crumbs.push(current); } } LOG.trace("releasing lock on crumbs"); } // synchronized } UtilTimerStack.pop(TIMER_KEY + "doIntercept"); }
From source file:org.apache.solr.SolrTestCaseJ4.java
public static Comparator<Doc> createComparator(final List<Comparator<Doc>> comparators) { return (o1, o2) -> { int c = 0; for (Comparator<Doc> comparator : comparators) { c = comparator.compare(o1, o2); if (c != 0) return c; }//from ww w . ja va 2s .c o m return o1.order - o2.order; }; }
From source file:jetbrains.buildServer.clouds.azure.arm.connector.AzureApiConnectorImpl.java
/** * Gets a list of VM sizes.//from w ww . ja va 2 s. c o m * * @return list of sizes. */ @NotNull @Override public Promise<List<String>, Throwable, Void> getVmSizesAsync() { final DeferredObject<List<String>, Throwable, Void> deferred = new DeferredObject<>(); try { PagedList<VirtualMachineSize> vmSizes = myAzure.withSubscription(mySubscriptionId).virtualMachines() .sizes().listByRegion(myLocation); LOG.debug("Received list of vm sizes in location " + myLocation); final Comparator<String> comparator = new AlphaNumericStringComparator(); Collections.sort(vmSizes, new Comparator<VirtualMachineSize>() { @Override public int compare(VirtualMachineSize o1, VirtualMachineSize o2) { final String size1 = o1.name(); final String size2 = o2.name(); return comparator.compare(size1, size2); } }); final List<String> sizes = new ArrayList<>(vmSizes.size()); for (VirtualMachineSize vmSize : vmSizes) { sizes.add(vmSize.name()); } deferred.resolve(sizes); } catch (Throwable t) { final String message = String.format("Failed to get list of vm sizes in location %s: %s", myLocation, t.getMessage()); LOG.debug(message, t); final CloudException exception = new CloudException(message, t); deferred.reject(exception); } return deferred.promise(); }
From source file:com.linkedin.helix.store.file.FilePropertyStore.java
@Override public boolean compareAndSet(String key, T expected, T update, Comparator<T> comparator, boolean createIfAbsent) { String path = getPath(key);/* w ww .j a v a 2s . co m*/ File file = new File(path); // FileInputStream fin = null; // FileOutputStream fout = null; RandomAccessFile raFile = null; FileLock fLock = null; try { _readWriteLock.writeLock().lock(); if (createIfAbsent) { file.createNewFile(); } // fin = new FileInputStream(file); // FileChannel fChannel = fin.getChannel(); raFile = new RandomAccessFile(file, "rw"); FileChannel fChannel = raFile.getChannel(); fLock = fChannel.lock(); T current = getProperty(key); if (comparator.compare(current, expected) == 0) { // fout = new FileOutputStream(file); // // byte[] bytes = _serializer.serialize(update); // fout.write(bytes); setProperty(key, update); return true; } return false; } catch (FileNotFoundException e) { logger.error("fail to compareAndSet. path:" + path, e); return false; } catch (Exception e) { logger.error("fail to compareAndSet. path:" + path, e); return false; } finally { _readWriteLock.writeLock().unlock(); try { if (fLock != null && fLock.isValid()) { fLock.release(); } if (raFile != null) { raFile.close(); } // if (fin != null) // { // fin.close(); // } // // if (fout != null) // { // fout.close(); // } } catch (IOException e) { logger.error("fail to close file. path:" + path, e); } } }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java
/** * tests the log comparator. Ensure that we are not mixing meta logs with non-meta logs (throws * exception if we do). Comparison is based on the timestamp present in the wal name. * @throws Exception// w ww .j av a2s . c om */ @Test public void testHLogComparator() throws Exception { HLog hlog1 = null; HLog hlogMeta = null; try { hlog1 = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf); LOG.debug("Log obtained is: " + hlog1); Comparator<Path> comp = ((FSHLog) hlog1).LOG_NAME_COMPARATOR; Path p1 = ((FSHLog) hlog1).computeFilename(11); Path p2 = ((FSHLog) hlog1).computeFilename(12); // comparing with itself returns 0 assertTrue(comp.compare(p1, p1) == 0); // comparing with different filenum. assertTrue(comp.compare(p1, p2) < 0); hlogMeta = HLogFactory.createMetaHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf, null, null); Comparator<Path> compMeta = ((FSHLog) hlogMeta).LOG_NAME_COMPARATOR; Path p1WithMeta = ((FSHLog) hlogMeta).computeFilename(11); Path p2WithMeta = ((FSHLog) hlogMeta).computeFilename(12); assertTrue(compMeta.compare(p1WithMeta, p1WithMeta) == 0); assertTrue(compMeta.compare(p1WithMeta, p2WithMeta) < 0); // mixing meta and non-meta logs gives error boolean ex = false; try { comp.compare(p1WithMeta, p2); } catch (Exception e) { ex = true; } assertTrue("Comparator doesn't complain while checking meta log files", ex); boolean exMeta = false; try { compMeta.compare(p1WithMeta, p2); } catch (Exception e) { exMeta = true; } assertTrue("Meta comparator doesn't complain while checking log files", exMeta); } finally { if (hlog1 != null) hlog1.close(); if (hlogMeta != null) hlogMeta.close(); } }
From source file:dk.statsbiblioteket.util.LineReader.java
/** * Find the start-position of a line matching the given query. * A binary-search is used, thus requiring the user of the LineReader to * maintain specific structure and a matching comparator. * * The expected structure is UTF-8 with new-line {@code "\n"} as * line-delimiters. As the byte {@code 0x0A} for new-line is never part * of a valid multi-byte UTF-8 character this should pose no problems. * * Searching for an empty line is not supported. Escaping on line breaks is * the responsibility of the user.//from w w w . j av a2 s. c o m * * Recommendation: Call {@link #setBufferSize(int)} with an amount * corresponding to the line-length. Keep in mind that binary searching * often result in a lot of lookups around the same position at the end * of the search, choosing the average length of a single line as the * buffer size is probably too small. If the lines are short (< 20 chars), * use a value such as 400. If the lines are long (~100 chars), go for * 1000 or 2000. If the lines are very long (1000+), consider 4000 or 8000. * These are soft guidelines as the best values are also dependend of the * characteristica of the underlying storage: SSDs will normally benefit the * most from relatively small values, while conventional harddisks are * better off with larger values as the minimize seeks. * * @param comparator used for the binary search. If the comparator is null, the default String.compareTo is used. * The comparator will be used with compare(query, line). * @param query the element to look for. If comparator is null, this should be a full line. * @return the index of the query or {@code -(insertion point)-1} if it could not be found. * @throws IOException if reads of the underlying file failed. */ public long binaryLineSearch(Comparator<String> comparator, String query) throws IOException { long low = 0; long high = length() - 1; while (low <= high) { long mid = (low + high) >>> 1; seek(mid); if (mid != 0) { //noinspection StatementWithEmptyBody while (!eof() && readByte() != '\n') ; } if (eof()) { high = mid - 1; continue; //return (-1 * getPosition()) - 1; } // Remember the line start position to return if we have a match long lineStart = getPosition(); String line = readLine(); int cmp = comparator == null ? query.compareTo(line) : comparator.compare(query, line); // Halve or return if (cmp < 0) { high = mid - 1; } else if (cmp > 0) { low = mid + 1; } else { return lineStart; } } return -(low + 1); // TODO: Should this be based on lineStart? }
From source file:io.druid.query.aggregation.hyperloglog.HyperLogLogCollectorTest.java
@Test public void testCompare2() throws Exception { Random rand = new Random(0); HyperUniquesAggregatorFactory factory = new HyperUniquesAggregatorFactory("foo", "bar"); Comparator comparator = factory.getComparator(); for (int i = 1; i < 1000; ++i) { HyperLogLogCollector collector1 = HyperLogLogCollector.makeLatestCollector(); int j = rand.nextInt(50); for (int l = 0; l < j; ++l) { collector1.add(fn.hashLong(rand.nextLong()).asBytes()); }/*from w ww. j ava2 s.c om*/ HyperLogLogCollector collector2 = HyperLogLogCollector.makeLatestCollector(); int k = j + 1 + rand.nextInt(5); for (int l = 0; l < k; ++l) { collector2.add(fn.hashLong(rand.nextLong()).asBytes()); } Assert.assertEquals(Double.compare(collector1.estimateCardinality(), collector2.estimateCardinality()), comparator.compare(collector1, collector2)); } for (int i = 1; i < 100; ++i) { HyperLogLogCollector collector1 = HyperLogLogCollector.makeLatestCollector(); int j = rand.nextInt(500); for (int l = 0; l < j; ++l) { collector1.add(fn.hashLong(rand.nextLong()).asBytes()); } HyperLogLogCollector collector2 = HyperLogLogCollector.makeLatestCollector(); int k = j + 2 + rand.nextInt(5); for (int l = 0; l < k; ++l) { collector2.add(fn.hashLong(rand.nextLong()).asBytes()); } Assert.assertEquals(Double.compare(collector1.estimateCardinality(), collector2.estimateCardinality()), comparator.compare(collector1, collector2)); } for (int i = 1; i < 10; ++i) { HyperLogLogCollector collector1 = HyperLogLogCollector.makeLatestCollector(); int j = rand.nextInt(100000); for (int l = 0; l < j; ++l) { collector1.add(fn.hashLong(rand.nextLong()).asBytes()); } HyperLogLogCollector collector2 = HyperLogLogCollector.makeLatestCollector(); int k = j + 20000 + rand.nextInt(100000); for (int l = 0; l < k; ++l) { collector2.add(fn.hashLong(rand.nextLong()).asBytes()); } Assert.assertEquals(Double.compare(collector1.estimateCardinality(), collector2.estimateCardinality()), comparator.compare(collector1, collector2)); } }
From source file:eu.stratosphere.pact.runtime.sort.ExternalSortITCase.java
@Test public void testInMemorySort() throws Exception { // comparator final Comparator<TestData.Key> keyComparator = new TestData.KeyComparator(); final TestData.Generator generator = new TestData.Generator(SEED, KEY_MAX, VALUE_LENGTH, KeyMode.RANDOM, ValueMode.CONSTANT, VAL);/*from w w w . j av a2s . c om*/ final MutableObjectIterator<Record> source = new TestData.GeneratorIterator(generator, NUM_PAIRS); // merge iterator LOG.debug("Initializing sortmerger..."); Sorter<Record> merger = new UnilateralSortMerger<Record>(this.memoryManager, this.ioManager, source, this.parentTask, this.pactRecordSerializer, this.pactRecordComparator, 64 * 1024 * 1024, 2, 0.9f); // emit data LOG.debug("Reading and sorting data..."); // check order MutableObjectIterator<Record> iterator = merger.getIterator(); LOG.debug("Checking results..."); int pairsEmitted = 1; Record rec1 = new Record(); Record rec2 = new Record(); Assert.assertTrue((rec1 = iterator.next(rec1)) != null); while ((rec2 = iterator.next(rec2)) != null) { final Key k1 = rec1.getField(0, TestData.Key.class); final Key k2 = rec2.getField(0, TestData.Key.class); pairsEmitted++; Assert.assertTrue(keyComparator.compare(k1, k2) <= 0); Record tmp = rec1; rec1 = rec2; k1.setKey(k2.getKey()); rec2 = tmp; } Assert.assertTrue(NUM_PAIRS == pairsEmitted); merger.close(); }